var/home/core/zuul-output/0000755000175000017500000000000015136577252014542 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136623444015501 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000433400515136623305020263 0ustar corecore&{ikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD ~YI_翪|mvşo#oVݏKf+ovpZj C4%_̿f\ϘקjzuQ6/㴻|]=ry+/vWŊ7 .=*EbqZnx.h{nۯSa ׋D*%(Ϗ_϶ݬvGR)$DD D~m{]iX\|U. $ॄKЗ/83Jp ώI8&xėv=E|;FmZl8T*v (6pk**+ Le*gUWi [ӊg*XCF*A(-aD~JwFPO7M$n6iXύO^%26lDt#3{f!f6;WR.!$5 J:1*S%V!F([EbD]娍ԹiE03`Cfw&:ɴ@=yN{f}\{+>2^G) u.`l(Sm&F4a0>eBmFR5]!PI6f٘"y/(":[#;`1}+7 s'ϨF&%8'# $9b"r>B)GF%\bi/ Ff/Bp 4YH~BŊ6EZ|^߸3%L[EC 7gg/碓@e=Vn)h\\lwCzDiQJxTsL] ,=M`nͷ~Vܯ5n|X&pNz7l9HGAr Mme)M,O!Xa~YB ɻ!@J$ty#&i 5ܘ=ЂK]IIɻ]rwbXh)g''H_`!GKF5/O]Zڢ>:O񨡺ePӋ&56zGnL!?lJJYq=Wo/"IyQ4\:y|6h6dQX0>HTG5QOuxMe 1׶/5άRIo>a~W;D=;y|AAY'"葋_d$Ə{(he NSfX1982TH#D֪v3l"<, { Tms'oI&'Adp]{1DL^5"Ϧɾ~R]ObY\[)d5ADgQd5!F6JYǹ>zs;tc.mctie:x&"bR4S uV8/0%X8Ua0NET݃jYAT` &AD]Ax95mvXYs"(A+/_+*{b }@UP*5ì"M|܊W7|}N{mL=d]' =MS2[3(/hoj$=Zm Mlh>P>Qwf8*c4˥Ęk(+,«.c%_~&^%80=1Jgͤ39(&ʤdH0Ζ@.!)CGt?~=ˢ>f>\bN<Ⱦtë{{b2hKNh`0=/9Gɺɔ+'Х[)9^iX,N&+1Id0ֶ|}!oѶvhu|8Qz:^S-7;k>U~H><~5i ˿7^0*]h,*aklVIKS7d'qAWEݰLkS :}%J6TIsbFʶ褢sFUC)(k-C"TQ[;4j39_WiZSس:$3w}o$[4x:bl=pd9YfAMpIrv̡}XI{B%ZԎuHvhd`Η|ʣ)-iaE';_j{(8xPA*1bv^JLj&DY3#-1*I+g8a@(*%kX{ Z;#es=oi_)qb㼃{buU?zT u]68 QeC Hl @R SFZuU&uRz[2(A1ZK(O5dc}QQufCdX($0j(HX_$GZaPo|P5q @3ǟ6 mR!c/24مQNֆ^n,hU֝cfT :):[gCa?\&IpW$8!+Uph*/ o/{")qq҈78݇hA sTB*F$6 2C` |ɧJ~iM cO;m#NV?d?TCg5otޔC1s`u.EkB6ga׬9J2&vV,./ӐoQJ*Dw*^sCeyWtɖ9F.[-cʚmD (QMW`zP~n"U'8%kEq*Lr;TY *BCCpJhxUpܺDoGdlaQ&8#v| (~~yZ-VW"T- 0@4ޙ-did˥]5]5᪩QJlyIPEQZȰ<'$VO"d.wEр%}5zWˬQOS)ZbF p$^(2JцQImuzhpyXڈ2ͤh}/[g1ieQ*-=hiך5J))?' c9*%WyΈ W\Of[=߰+ednU$YD',jߎW&7DXǜߍG`DbE#0Y4&|޻xѷ\;_Z^sнM\&+1gWo'Y;l>V ̍"ޛ4tO,{=hFѓ$b =D(zn;Y<1x~SJ^{vn 9 j1шk'L"cE=K]A(oQ۲6+ktwLzG,87^ 9H\yqū1)\(v8pHA"ΈGVp"c ?Z)hm.2;sl$瓴ӘIe~H|.Y#C^SJĽHǀeTwvy"v܅ ]?22R.lQPa ˆSܫ1z.x62%z].`Gn&*7bd+, Z`ͲH-nမ^WbPFtOfD]c9\w+ea~~{;Vm >|WAޭi`HbIãE{%&4]Iw Wjoru ݜmKnZ<X; ۢ( nx K8.|DXb +*598;w)zp:̊~;͞)6vnM!N5Cu!8Wq/`FUwWAֻ,Qu W@ Fi:K [Av*_958]a:pmQ&'ᚡmi@ zF(n&P;)_]µ!doR0`pl`~9Fk[ٺ+4Hhao-jϸ??R<lb#P-^39T|L /~p│x@Bq"M/lja\b݋af LnU*P(8W[U6WX ZoѶ^SH:K:%Qvl\b FqQI.ȨHWo;Nw$͹O$oEE-eq=.*Dp,V;(bgJ!gF)892sw*+{[or@x,))[o新#.͞.;=fc<)((b۲Eumw峛M2,V[cm,S~ AF~.2v?JNt=O7^r.@DEuU1}g$>8ac#sĢB\PIPfwJQJ;Qxm &GBf\ZA$Ba-z|A-I @x70 晪MV)m8[6-Te@`E|=U D(C{oVa*H7MQK"<O%MTTtx袥:2JޚݶKd7UZihRk71VDqiގ\<:Ѓ3"gJJčE&>&EI|I˿j2ǯɘCGOa9C1L ={fm&'^tigk$DA' elW@Tiv{ !]oBLKJO*t*\n-iȚ4`{x_z;j3Xh ׄ?xt.o:`x^d~0u$ v48 0_ | E"Hd"H`A0&dY3 ً[fctWF_hdxMUY.b=eaI3Z=᢬-'~DWc;j FRrI5%N/K;Dk rCbm7чsSW_8g{RY.~XfEߪg:smBi1 YBX4),[c^54Sg(s$sN' 88`wC3TE+A\.ԍל9 y{͝BxG&JS meT;{З>'[LR"w F05N<&AJ3DA0ʄ4(zTUWDdE3̻l^-Xw3Fɀ{B-~.h+U8 i1b8wؖ#~zQ`/L 9#Pu/<4A L<KL U(Ee'sCcq !Ȥ4΍ +aM(VldX ][T !Ȱ|HN~6y,⒊)$e{)SR#kהyϛ7^i58f4PmB8 Y{qeφvk73:1@ƛ.{f8IGv*1藺yx27M=>+VnG;\<x7v21՚H :[Γd!E'a4n?k[A׈(sob 41Y9(^SE@7`KIK`kx& V`X0,%pe_ן >hd xе"Q4SUwy x<'o_~#6$g!D$c=5ۄX[ു RzG:柺[ӏ[3frl ô ހ^2TӘUAT!94[[m۾\T)W> lv+ H\FpG)ۏjk_c51̃^cn ba-X/#=Im41NLu\9ETp^poAOO&Ack vz(vb$^Nyo$p[DtUCE9s".zɪ) ӓT)D:fci[*`cc&VhfFp佬)/Wdځ+ uR<$}Kr'ݔTW$md1"#mC_@:m P>DEu&ݛȘPˬ-Ő\B`xr`"F'Iٺ*DnA)yzr^!3˫LFNw#蜮 2/ +"(LOb]H_^=5Ϸ_\)MK> ۓ,j|z6OSu;BKŨʐPqO K\{jDiy@}b|Z79ߜih(+PKO;!o\戔-QB EM;oH$$]?4~YrXY%Ο@oHwlXiW\ΡbN}l4VX|"0]! YcVi)@kF;'ta%*xU㔸,A|@WJfVP6`ڼ3qY.[U BTR0u$$hG$0NpF]\ݗe$?# #:001w<{{B\rhGg JGIެE.:zYrY{*2lVǻXEB6;5NE#eb3aīNLd&@yz\?))H;h\ߍ5S&(w9Z,K44|<#EkqTkOtW]﮶f=.*LD6%#-tңx%>MZ'0-bB$ !)6@I<#`L8턻r\Kuz*]}%b<$$^LJ<\HGbIqܢcZW {jfѐ6 QڣPt[:GfCN ILhbB.*IH7xʹǙMVA*J'W)@9 Ѷ6jىY* 85{pMX+]o$h{KrҎl 5sÁbNW\: "HK<bdYL_Dd)VpA@A i"j<鮗 qwc&dXV0e[g#B4x╙✑3'-i{SEȢbK6}{Ⱥi!ma0o xI0&" 9cT)0ߢ5ڦ==!LgdJΆmΉO]T"DĊKٙ@qP,i Nl:6'5R.j,&tK*iOFsk6[E__0pw=͠qj@o5iX0v\fk= ;H J/,t%Rwó^;n1z"8 P޿[V!ye]VZRԾ|“qNpѓVZD2"VN-m2do9 'H*IM}J ZaG%qn*WE^k1v3ڣjm7>ƽl' ,Τ9)%@ wl42iG.y3bBA{pR A ?IEY ?|-nz#}~f ‰dŷ=ɀ,m7VyIwGHέ 2tޞߛM{FL\#a s.3\}*=#uL#]  GE|FKi3&,ۓxmF͉lG$mN$!;ߑl5O$}D~5| 01 S?tq6cl]M[I5'ոfiҞ:Z YՑ"jyKWk^dd@U_a4/vvV qHMI{+']1m]<$*YP7g# s!8!ߐ>'4k7/KwΦθW'?~>x0_>9Hhs%y{#iUI[Gzďx7OnuKRv'm;/~n-KI`5-'YݦD-!+Y򼤙&m^YAKC˴vҢ]+X`iDfn#y 9D*A$$"^)dVQ.(rO6ӟZw_Ȣaޒu'- ^_,G;U\cAAz7EtlLuoXuA}bT2H_*kIG?S(קjhg 5EF5uKkBYx-qCfqsn[?_r=V:х@mfVg,w}QJUtesYyt7Yr+"*DtO/o۷~|hw^5wE of7cꃱ.)7.u/}tPTGc 5tW> l/`I~>|灹mQ$>N |gZ ͜IH[RNOMTq~g d0/0Љ!yB.hH׽;}VLGp3I#8'xal&Ȑc$ d7?K6xAH1H#:f _tŒ^ hgiNas*@K{7tH*t쬆Ny497ͩ KVsVokwW&4*H'\ d$]Vmr달v9dB.bq:__xW|1=6 R3y^ E#LB ZaZd1,]ןkznxtK|v+`VZ3JϧC^|/{ś}r3 >6׳oƄ%VDSWn 0,qh! E-Z%ܹpU:&&fX+EǬ.ťqpNZܗÅxjsD|[,_4EqgMƒK6f/FXJRF>i XʽAQGwG%mgo 恤hˍJ_SgskwI\t`ﶘ080ƱQŀllKX@116fqo>NrU Ѣ9*|ãeeH7.z!<7zG4p9tV|̢T`˖E ;;,tTaIUle*$!>*mBA2,gJIn_kSz)JC]?X(OPJS3.}clݨ{e!MB,cB߮4af祋,1/_xq=fBRO0P'֫-kbM6Apw,GO2}MGK'#+սE^dˋf6Y bQEz}eҏnr_ ^O^W zw~Ȳ=sXअy{E|\΋"?|NKfֱn !-p^:ZYUv`Ƌ-v|u>r,8.7uO`c Nc0%Ն R C%_ EV a"҅4 |T!DdǍ- .™5,V:;[g./0 +v䤗dWF >:֓[@ QPltsHtQ$J==O!;*>ohǖVa[|E7e0ϕ9Uyzg%pg/cc6RS`HFLЩ LkJu\!`0);Sak$Vfp~C%YdE6c>1ƕ (0W4Q>@>lWN"^ X5G-nm.8B>NOI[31,j2 Ce |M>8l WIf|\q4|UkC.gr`˱Lϰ} xr.~l-ɩu_Drd31V_ѺUib0/ %IYhq ҕ  O UA!wY~ -`%Űb`\mS38W1`vOF7/.C!Pu&Jm l?Q>}O+D7 P=x@`0ʿ26a>d Bqε^a'NԋsI`Yu.7v$Rt)Ag:ݙyX|HkX cU82IP qgzkX=>׻K߉J%E92' ]qҙ%rXgs+"sc9| ]>T]"JرWBΌ-zJS-~y30G@U#=h7) ^EUB Q:>9W΀çM{?`c`uRljצXr:l`T~IQg\Ѝpgu#QH! ,/3`~eB|C1Yg~ؼ/5I7w9I}qww}U~7뭱ԏ,}e7]ukDn`jSlQ7DžHa/EU^IpYWW兹Q7WyTz|nˇ _qˍ[!;n ^b k[);ng]ȶM_u)O_xV hx h[K2kـ`b duhq[..cS'5YO@˒ӓdcY'HAKq^$8`b $1r Qz?ۧ1ZM/G+qYcYl YhD$kt_TId E$dS:֢̆ ?GЅ'JᏪ2yҖhB֡#-$+Jpሟ,Cg:6t>O-XEmkFLLkY <ZPKoma_u` !>Z;3F\dEB n+0Z ?&s{ 6(E|<ޭLk1Yn(F!%sx]>CTl9"و5 |ݹր|/#.w0ޒx"khD?O`-}cU0F}|䀕nwRqR9~ i±za+HFNi>. EWz:V^&YEs5Ȭ N *7aſ"c~L :4^`jSؐd 3]%ijo#^*˟ R (0\lCulɱĘʦ|k*+ŘOi`:F_Ѫ2)sCj1THɩhS-^p b~?.>, `0!E%ҏ:H =VՑӄ| Ć.lL t1]}r^nʂI-|i*'yW='W6M$oeB,޳X$I6c>EK# 15ۑO2Jh)8Vgl0v/eNEU"Ik dRu˜6Uǖ xs%P ع omWl҈sApX!^ Ɩgv{Xn|$̇d`>1Ljn떚F+B9l"UP۾u2Ja>0c0Vvގj$]p^M+f~@9{bOe@7ȱ^%u~-B竟} |23 Z.`oqD>t@N _7c$h3`lg\)[h+pHBr^J |r\8czEnv@qZbRT1e8V Scc6:$[|a.fpU`ZR֩bKgTlѩynۢ, "1LӰW&jDkM~# (C>ϭQ3{ߤ%EN;?P%ٱm -{2k 8Vbv"wŏݙmn&O1^'}plM)0\n ή ?Cֲa9H] lX9^vCο -vd+OUgRy2Я\ B0!% #>bJPUck\Ul'F瘏Y4Ew`[x٘p,>9V"R1I>bJ` UL'5m1Ԥ:t6I >jz(:W֪Ƹ)!fꠗe[XLE4atGS1px#S]MF˦NJPYDX%ܠꡗhl}i9f?q>b-E'V"mNf""ŦK9kǍ-vU #`uVi<s)/=r=nlӗЩsdLyVIUI':4^6& t,O669Ȁ,EʿkڍfC58$5?DX 4q]ll9W@/zNaZf% >Ę_"+BLu>'Ɩ=xɮ[⠋X((6I#z)2S zp&m?e8 "(O+:Y EaSD]<^(]В|Ǚ8"oRs?]\McZ0ϕ!1hKS`h0O{!L-w]ln2&0Ǚ'0=.T4G7! H/ͺ|@lX)+{{^s1V63 ۗI"*al NJ`Q8B\pup6_3XqCXznL9:{o qcuו8`n{ave=}OR9~yL Z1=W8>É R$|L ]OfJl˪VVg:lDԒ͢Zu[kWۗw{{7st08`J0ꨴU1|z:9dX)z2!S:'q9| 76"Q;D*04Zٚ ?V¼r8/G:T6Fw/ɚ~h?lUc3MکEen壹n\殸,˛_uu.Jssu/*47U0)l?R_^Uon̝f-nnZTeuu nn/*0׷տ·sHH?Et _I`[>>0ւcz/Adh.$@bѨLtT=cKGX nݔ͆!`c|Lu_~ǴT?crO e9d ljB?K_z>p%'3JQK-͗R>KkΤOq,*I|0]Sj%|-Ԟ = Ʃ%>H&t;9`>$& nIdE Ͻq*nŘʰNҁػ޶qeW|_*[Ի i N6gY%RnV83#Ɏ&Q[7&N<33$g hA'FIXUg]OX &:H0VX$_]i~fPpq-p?V[x@c5R;,ʸO8X K"ɯG=h eДU9onuegIze<*ܧLM|-h Ne <xPc5ed!ver,r2.$`BtGcӢ^"ǩW8j}ZW>sc@<:P=2l{%e+AKVa~drnr1g!csgyr д񼤲tTH-gzʴ"=/B=2YGSpڡ)5鸑"oiͰmߖ`\n=(Z?rAz/B0\]i+\XS!E=h D4Ld#Ldl;]]g!%3 -\Ïagdc@X>C_gM{j̳_;/c<3mig H K7LӲ,m@Q|yM-d*aEPua* <\]|A"{#9NFR'g`8uPoaޠ}MF?$|t F O?\G%Zώni;:;gɫ,H xZ[w3/I|{g>ְ7,ݝDץINs!Idɜ746Pge k^78V!$lg`*U׭ѐ1v' w`#\HTLg8G"bNyن"qxrF%OvCbx;x'˼xy=qB"NTy"ڤq 2؂ߝٻY|FMf :e`w޾7)ĜAorݙ}-4E#"ƌOp Q1/:J1mmrvGg$2d fo\wI NSMqZ䤈c!j[ 6ƹɓzqō"5,J<ޖx,(,${|gӳljp6^x^Ani*T:O1[W8_8Xs8J|U! ʴtЕ>2 an+/dMJcj7DN`8i˝[+M}z1󅌰yQj/ d7c;Ӭqg1l1EM`{'$d޾*=J!eG :4=gPJdgVG %S!rQ-[e`,I,}댧qxkՏfU6aާ/d&kN (\in$JN=npv?hyN%|^!4ћMB$5^4RhMVHAҰNO4MS7˳4o* MCL݋XI.B%qׇL'3 @RzuM"/"TDlx*mЂTHLfO?0K~ݮ!4p} mWhUaUpQt[6Md-rGWySclס!a!˸˛>\iLg[BE.W<b6 q[)*>|FjgaW_; 4B%!Z76N׷YVaD.R JkjLB&2)HV֔ ы=B|1UWnUMj?4PWba ֓@h#ySdfT6nmXi^^U3~Gpُ?}xPS\oǠF7Rb A5)s)3)#*ԂoWx.(DNU/|xRǺϧ3I^}as}ưVO.IVWH7qBZn얯 yU!V4pTU!% XP9_|Ã/ nmkehZrX\o Ц90E5HVLˎ|7XzoDazVf\xW%Pu41@i6Ƃ<.jV6D D ܒ U?(PwtnLjɫaMe+,iy\oMT]XwqqbY[VB&qUskW//M";k+vs4<5^O"!C"z*-OmY@tߡhU wռH.u9W$qӎ+,`N꯳ﳮ˴mw6َHCt_MMں+a*Bj6Nc|BLIj4^kU.r7pFVjVa ÃUwlA[|C#O"P*:X, ØӠwSܸٙ?Κ {dM[uW 1,YyҬNeNur$dD%Ѿu\@S]3 |HB~ô 7U!*I&WLUkb|FHgm6<5$+oHPy ኮv!Ъ@FyϯQЪc@r US+*:-IWgUlm%QGLsL$Q_5+}𮹶mqS=6|~sCi__AO2L'<9D``xX֏n$lwgk6cۃ@H-~ BCv$!P>d#`BXO-Fun63@op fe{0`F7h{0b HY^FKiY M}x{ܿǚxlwE/;BWI$sJ霷ORt {?4=h* 4 o_Us_:BH=Y|t] oƒ+ /7d&`爑x' dER6H| }IeˊGl$>Δm~5gun"b8.p r~K&[ N0HRjf D *m+a| p jn&8S@oED6b+ʐ|D?څB$4UeUz}g irw)'42{J:ZA䩊"hV ]',eWtalsu~a?e6ԪGL `%e{:į3UVe/ʴx_<4d} ݓ.!-2p>/!PWjϟ(j* gZhʉ"_V oWu.xFz[Z݇wG<9ӳGꯏgGo~:|{v8RNdBq(#ADos깶nE2! D DK Q/bK c]%3u291giTs H|fg+N&R ?8^W/ @N"[m2Ǐl 9ܼm+iy;$D߫0Iв)}S: –_ dݺ7IѮ늁?$J;k*_ܑ\{K$oEb r7E -ڊV# A &aeNYAhMKBt#FK'l@z1c$ t60вZsU{QBP/p;&/_ɃP δ]0N>Ɖ!+}ZjoSi @ 4(k+*P$(m$nm `a~$k8< 0y(vlǏlʦ_Ki2y`0Yq#E\b$+I>}Uy>/:JC.WMD>I۴p,`c\%]$IKj)Ls 4QqokbyZR\^9|I, 2* 7Q1`P̾dXr+Rtb ]?avRP4Cn«q{-&ʧCi2as4YrɊ^4ܚ#['f:"1+v/"xӻYV4=9^96C.>ZQ2ӣnH=n}I*im…@lk\8T+j[`v2p?́[rtH~} ؛rJ @U7pJq1a}yH׿G=$MQ6-kx *ԿPGD@<}ˍj~"zɇ$6 8^檖Cr *d]2 X/13rO@sCWOVH<<7S.T6"n/ #48,j0ltem `=081[ "0%Ύ %" ?Cw3vo;/>\<.J9(H":G:L8jYUURÁUރB51{TB[o1C6O.._2=}DQ/vxcՂ _!_'(rV!w^6dS8ШT ٨$CPDd\BhIV[Bj!n'Ͱf@{ [{fj}x(?P5|QCl)x@-.Mre>,rwCw+;4Ҩ>xx /D378|_j*%/U_UL@c ."෺PJ(݂P=y `H6cJwTQ+j\V#>wlwmFYقXg{ZH*>PwGB-e-eʞG(ۑP*z=PoGB-嫄-ʟG(ߑP*?PGB- V 4؞y;lAX%TlA؞P-S"XCh=j&7ux9/A힍&N ѯf|1'USUH|9C2"鍴ugZ(5^-}(-& *̐|-FoOY1{vcqw S=ͣlʺ ќAfEVl̇shhۡ6lA`P7O"GRO FCr1Ҧ@ 7ʅ"Y,T6#%\"e"ѿ2Aj>q="[+G{U3F2ī)RXS6Y?>a&>ZL%*ɇN9FPEEJ~7[8WVHTV_80`k3*B6=)M7dP_NVe`MvVZ8CN`  W+HOi:fţ<9͒llޞ0GUk_?*yZ̼\wˢnpupFA x2SҨl8t]1%Cf2g Z9e+~,_<}U({ eG7k#??͡"QXNBNyD-!%_sJEb:pn̦=D%ϨQ9kܨHkY 0@s4fJ$42Zi@ TR.y)S’O$ F<5Ћ (;ߠZGstQqdJ*"B:Q7H39R@Na ÄYʄu$)JO G:.ڡ2[.^kW;Y/ wi~q%~͓(CB~[nY&t-:nĖWףZ"^ = `P.RYM&4g>hJIYRxYZ7D8&2"yvz`JϬl2\J6@ xnZEN)섐g˚p|]J IS2YāGMIGޙSR1fh4 _gIօ+\-]WG @Vɥ2 KP 1[Щ ixZUzb FC%?IBF 34ߛ5 U›ǎbXb45?0ky< TeTN0?!1)Qّd$8~H~]+Gޕ$bgu!d r`1y)qDl-F5˭N^0d)}XutS7e/e˧OǷPO2 _2\-ty]9<տrxo?V!_X^4z >UgAO*~Zt}/"qw{}׸O't̛R߯֫UƷrJq8@ziտǿYkqO?G|o~IGSk?^:<`i8`mז5$wqs׉ 3v2U=?f|CgZV~Cj.[&8Aet5pc!HC g$v)dqzl?`ݯy O߆Xu>&HUZUox^\lܒ@7V]֧X21aA΂)mT {I+M:)p|,<Ī6Rdt0jLBD F NGV ]2Nʴ|z0Xx@|JeBZ4Wi}tpWe6'l#Tm0 f!+-`VY4UX[yb$X;Y|0HD( .y#Sp߸3  FҀE#eUȜը4-ɱR.-2H:WM!qr<&c%rP!ڊ > G37; a`Gfvac{ʿ (^*BXd1u5 0o;V"$i/KU#*P|yOm$D#6"G7tBF# Ǣ'MUx<%QAfNj3|fOaP&֍@ED֬6evѳa%A<[ؾ|Hbg# ǥwJ OzӬE䊏$Cx.!Hcpg$F)~~!Q pƫ\!̫܄²Pul1u)yڐlKpS2-4&R2jif(ڲq:TL`/EzpIbuH\M1c"(LᒳŠITpb$=T {RVScgJ+!C#xOq*f2XD1%"Oajʻ9 ZnTiYځwe4L + 9L#"T\.xRKZf/i ŵEb$|_7$8*1ڌVU$9¹{T)#ȂiPuhcdv 2V *LRsY1Wȱ5UJ֒cAejHD(ha$MM ӡf ]BQBv;1iD=֟m]`˒6MH 12Hb0 NFIbbQ )шRu3}|vK nl-ƴj\h؟^7հhPAe 5-3[ g:i4c* ju[51h4$l0b;z38e39KN- /U1Y*. zT7­)i<1pEߒ8kp| G\;9N%BTb:aXzp/pe H0nlhsVZ8B]҇VRvyb~!)F>rMwgATo&u! Â齔5RI0΍׽ܑo\fHEMf(tNM97V]hꑇ Bܦ;`wk&t AR$e3UZB2+۲(_i#h噅~ɕ EBeIuAfQPj|pAċV,JdMoTUs]9Ht~16eKj{0wɻC`%SPr=4tm1#\x Ti 7IRC>:?t^9?rEu7~QeMNP~VWu'0tT-jXw2^`VVMuJ`8IW}#qp`./ʁEOmM78Ů:+Hnj>$2H:/=V$8Z1G, *u> KY*rt#n/[ lIWi.Ȉg5JAz \ >kSs^WySRYoBSME9"%%݇< IJ8H0I,,-V30 ĬZ9P X=`&R'$ xa^4G0y 5^JW񖷡+93Iަa3ߦm KiIT~V]33uΦ.яءn0sTz"$tϊ @%3NuĒ@` 2֥͛FE6{e4g㥗1g{H\X&wF_zK"xNQ7%bz{Ƨv^=,VϿc| >XT-2(W"Aj6-|DlY>,).[S\Hq*A6may K&V($vF_ٌO$8yyM1|^g̑-e}4vV 'Z =y;;x@H! y8oSNn `g~UٽE핵b:avޤǗld(R>o$n$z/ :pYp!İmE5 wk: .#E>{bOCxI821VE<Y5H:W/;G$x@{7e̾|R&tt!f<6 .ZU}1m{,G PUyCQy7 u-ߖ%-x$iZAB#$a^'  3nm^e oVۿ.~AMk,\ԱEHNYn@g3jLhT'J,3Qzzo'L}2@%MnۃR9KC$c]/*uSR=yzz qlՅ3@13@gᅤaKZ&8bZ!Yn"Omܧ mnnnr~^.4.zp꥓oꈚ?c/-8QҢ{H:nZV 18͒'FBF1`$fϖ#Yv SoR;koܬv`iuM1reܓ2n[l'Jq4i,Şs$*^)R2sp[5x,]zJƼ(&WeC٨`4*؜.~ .uo q[Ђ[.( fKA 1 'ڋ%q0( P ΰ A# dz!1̫VιPT)73٢<+X@GѳZzИm$Z 8zG H9$X8!<&Eѣ; /\:3)cμ;I|4tm#K4y"Ao4Nbٟo+1XR[|\Q̢-2 ׳1K{]]k]A+>"$j#qn֏}J:cQn;I.eSsrjCբX G,jX-5Sj-YQ๛QQ\pI&P笝U[״y==PP{0x!v\$tv:Pf N8ORlYPD֖CԫLY1^lABwX8'Jz8ZM]"{]n$86 aY%`,8!η u4X3<R9J‘,+Itk8\`VWb-_HplQ M3,x FS Pl##;zX?|ym>x3e{S~ph敶LU^`Exj>2mn\$ $V[1ucn,+#IpjҨ GTCov^}oL}v]MԽ\z2e)Ō=w I= '`UhЛ~-[;c&}\ś*ٻ߶ly?Ed{ 6 cPYRR=C%ۑhJiy9ws8&יAߓF@<}8O1S axa:җCQ:gP;%3өq'>_ LVx~@L)Feⓠ%i/I!zyڃ{%w,ƁK?xrHjI~7~=?Ih&ne{uif)BOʚc/7($Xקx_NUnz 8 AjFݳJA]5xf8Q FZ9n,is8*K:&Op"jh"&3y 1On<<=ĚHQe?mI$I+;shIKj >8<^d$ H}gn2P4U_tg8_͝ uUxlb2 SdL@%p)w 540famvKU|pzqpu.I[hpV8~q6Wma%!Xz7d2L*'Pe'Ѥ+ wP~ pOҥm᳿}?jh.ӫr\W AcPc4}$2> At7\g~SgI6uWUcE*'#?#yг7rGa$E`etaRL&\Ij@[h?ۍk?<Lۏ@3 JO! Ҋ'#Zxi*̿g4V)mW~}.e%Ҏ388 x ϫ->-ͱr0/XcAS)Q`(4TqoouD/$5@iwY G&b&(w)03⊃cE+LUyaYnĜ+osv̮wLN{2WpU - lC>^8\CLz5 rܛ!p7MX6q+AV^ټOA9?S&mv \{=lF 8?Œ̾>7Its1DhI''y%:\D~^)ZbugCܠ8W%6H݉Y&uV4ٓX~n'V BK)iw!KIgai%m wªmdž9&OYJ J.(mun7,Q;q4nQfKTE 560A:BtH4R-XAdx ɪ;fH>B&KѼ-: F!_*)խ`| Zha0y|} $R:N_(rd+B^: ۓ'8qߥRlzQިd=e"4Zx܍// ionJ` A~ul~Sa0 I3LGs˾e4x,u1ΧG,c{#' {)NZx_Rut]㩱}HBctS|"D* *0wHsIM]ukށB:R ՈI C\*N4|i$_>ɚ'[`Ay"Q/6xAE<}`ҺatVOU耗\ȿEShY٘;rXһ[/&j[muw]iQ!jOQG'V;) igTn2Xʽ#4T8]o[l+vf7=f˧0[VՆ۫>!s7c,ЙZgGv0"+mn}]9d$bX=v&ܟuhDLu&Y :n$kPw@ j 0ӆmx]9ZۛsdUr/޼_.lV,AKʸ@K].6| j1$p.>C qÎgZe~,/K͑Xؑ8H%Q=: .(#CHC!d0* `@>L*& 6VOf;#p&A}ѾFRo:*<})䬨䭷C3Eg^Y[a8773/${?8 H\v}]T! -=}r*;I+Wޒ8 kdCe!D[^%e Z?R9EΝq0>Jur?𕓈&2іدj2NϏ-9oүBeQ/NÁ 9 <>igym} |3;ED(ȵHE9 >RQJ^ 1@ ZcSG8JD+#XћvHћv@3 \WjF`VhN,3Je<\+!RQ!mqiX3vH3vx eK7 ^bu+K&U$ ~0jG΃T(C@M0[Î[KA+q򪚻N3)k.UfPQu@=8Í{jCԧr Cl>͚vh5B#F.II)?F90l\0:apqP*Ū@7E55W` S^=N`m \K|D~2&qTٽW4ax(=Z)^IَK턶$"'el7îoU*A"&ѭ%xX.!%2%Z/K2jC5.~f?`h1iNi8]{OĶBkk *ĺ%xIﱉDÜ#0 9ʠ )Jq欢Dvpd{NgvĪÇ%qBn5D\GHF<ƹG0e+-L6cJ $"RM/T(p#$QH9ƨ-Yp'W !NNk3@" P?hx hd E$F( 9o(5q<ڂegfw"ZsL̋v^i 2F*p&T߲F%i!K.N@wCqqDtf]RZAQZW*-PZRJr*Y1p+poU{ e.K +s KfYII^c%㗗<&//yesRi7Qr!v( y^[#zQ@jwOEcYBlH3jBDiD+"eultRD&pf@c@IˈBͨe͹@BD>+ER+ezE昰F:Q@+B6*|0炤GAQ]4Ua&U$FZHAQczȭdAYe0^ Ցzƈ5QTض@19Ebz}dWXH=Z#iZ92)K!`$ώQc2F^ RԂv4ҨSѴhLIg(@!UNx.h$95bF шJ1O[iH8Ox1@[UÚ6GC .X_9(Zkk`{@-93 E \3 1FH<4$S ~Cм `$giuL(;"9cKռ;HA=:xdž"B3L f J#+B&zkmX9HpCyAOIEXײKJl&MIkS$7űFpTqQ9 `"V pmMLvj-R~t( ]5aզBL>l"ܦwvVC+˜Tpi}W/. c3AIh<=wb2-Ɨ&/opXc8E4CCKCQ*ѩ.(>Af5|;+ 14XXeBIMPKm6:z]@BHy`޴[ #im]GOT4ѻ L%j Ư7uD h5{I F),Lhvx*P$ 7$Q*V1tt; :}^b5ѬISpO!L3`iL{<ֺ @ 66*q+"\P-ӯaR)&kЈH̙i[d BmR.& pf%Ѳ>ba}#U AG,JR&%;ʋM Frx!1&糡j{"yxũ]q:(jh0 $V)iB ޞ!"!)RpExLxHq7IZ' Ibi$Ϩ*mx9F#D$Mb7 x(,Q SK$TkUq < h l)p%PL6 *4,RA*9}GrV,NU nGTB+h5H$d Xwa,-2Z XDž\I{14zp` .SDe:>Ǩpyk:Zjv-tkjo&\wXm)a GWܸ FF v)/6!D9j0; LEN`D5D7ITBhi[mjpu'Ȓh 0\0Cl1aˍ`)Ãѿ{+.YH @U) M=MR̽tq}n=H@w>nK _/ls^ئ#N)zA)T(prpVh:)A̖Z#HDNw[5Q3`c԰|$jTXë*•ͮ-tJbQ[?*Oԏ(%uU¸Bˏ_iJ0T,iΧ\쎆 h[v;\!CR!YfO^}dJiI̔ݿ@Mr?U`2_'[ )R77¾+y]92x iTC24 L&G]?}n26|73-^WOO~rChaQ|Dm Q 2ih\Ϡ(74qpm=#Ⱦu=`?#Ay}K;uj[_{p {Vlu>I:sόXuW=^{D+_<%+Uk"TBa(1->VJ^+ލbޗK3]`ՇY{Et;!]!M RKF؝QҒё kCk;ã$йٽvB$AB"Vie|>pLhD%&ǂ=ţ4RU0tFYĉ2WphHKFCgA NJYO|^aJn' t,B/:۵f}r倎5GHE=h29?ߑ[;#;ruA?Ip߾w5ǽAߛ\pei_e/~\bjKpOf0x[x2,ҋ! 7_ڼF u_Upюj Gi,iSvsHy_b>f`}XUTOfaxHTށixYW.FlնAU+xl07HI9%)/*EhCdW٤/X۳ULry|3KPF[8+G94_#cS1:]NXwգSWةw}!l ZY~O.Wrw(=)UՐrޔsJVQn;>^۝þw{S0N%\^An%cV=5Nj'-Л':`X W6)W**BE6PWzL wA4¢/MZr+66Bq>\"\T^n%) jEW[a[` }=/ q1Հ)>Pb)a{SL$,azy3 F[ Q&t#H|iO_~,zh/ٕWM&|BD<8NFf4򒟭DK"Ƶ9&Ǔt>ttgR$9(y0 f5$0;ߞm̾n}ztnf.9eS6/Ż#gKqN.|rNm}X3>6_K` u\IfUC. 6%׆@+LW^7I.,S?H^ >4}Wʗll$ndɯ]-+.dSۗI0JͣO;on2w,yS/s|1=ջ^gylM,kk,vYzBXA=اV3:Ga'QZrGJMXy¹@5'Ľ:[YAwsLI R,Ai$qdNcG$}B쳆}>l')``,ByrFҨJ0B)+,C;T&h6*ތo {3B =*֨}E3{k/έ>m_?m1oX^ ?ղx$pxjz{aj CtX I\_wgd)ZN鋱ٹ9 ; DƿnI·ý٧*Y76ZdEx)(۟Ňcss6 #߮\ J/ 0rLDy\ i ņ:f>$ +j$M<+8lYf_6jC08 SMro]/Fz~i )b4YbB~Ly-H^ 6IKiT.zyHw#Qq$@kVۈ`4 /'+ao%."DRX.he?%QB-ҞCix3efkmSff9xӦ!jHPGʎ<0DџѬel;"q9#B-πb!JQ`@( $>Jg("215r}HC9DŽRDT&F AEHUyDPCNE C )$ XNMsZkvz.!PoO.<36%Xۣtl;Qx0(%nK|(3` $7bQkPjiO#I7QO`YSd[`Pp+wdWaw*?:?\' G@GնS5UᴔuTjUZG:QUSD+nvf7nvf7ͣ0͈a'gd&Bb+$L@ ㎑}lCgU{>$i#Ò%TDq̤IPcAu,%'pJHsf−$`8:FV- '7R6NA6ڕ;+Q߿l۱YUJ}]^vCZc1\7|SoyJ*m0-\KŶbIBxWKɺZ/]n CW륫zVv]t^Z/ jt^Z/!%R݁ )\\\\\\\%#N^gmp (1'oNjgN p:@od o6g`A?}L|<X 8Rʘ׬./r3A :d4l}3p6lt0 i6'6ņIp<>z`A n2m Wi.NH}~3s_{~ƒX|>52&o˞賴h@"2M%LbS꼟AY?-yVA.n4BW?OΆc@Β>t@ >@`0C~=@?U R]EŪEB^m;#o=[3ɳq3_eOdE+_?L2ד?÷}$v߀&gJ7feWL2)G}#0v^^i>әk|ZN:lN`̛jsnEsJoW`͍:-[VU^Sl_nUze߿7r|$ W3]YwБ꿷W|!`öDfu{/ :q¬"a,毳cmnoWyƥmq-m j?c{V6e_CHT^'x*vA=+P?v;lf% K-e3$ؼMlW1hsW=0R@5|-Sޮ[Wܖ$Ə_3155pzBeKؼ{>DzF^ XfJ6Ii,SZ+Taq _|^wMz8'^(DR3yTGES8*Y~.t'B|"B8KC8)s!s29>?9υυFs!s!s!ҌI~/~+n߽TΆV-w@9_vc+gݿWQgm~k_s͙_Rk|"q0oQIGc.a in`plW_cySs#+h\*BY$Ίdt)1=w16]~Tw3~yy ~MH% Jl\8OZhY`9/˅]nE62&m.Vq !YdDQ\X,SiUEy^l @d;lEU,E-@H,ɄiI,+K ž$ r|@55׏qګ򕖔KKFt A&E"UITxO FQ.7 T" ߀*e)'KƸ7%iQ_ ޗ]2>V%uP FplIѲp^h5S15TIKkt mU#XA5REܣqx_k:}-é[hLf8#m sbRFYGhlSK:v~7۪)JHgXmL$X27Y:+yNFhdx_ns1XXWAQ6@Jlω2$)Ҵ,iq0qd>c|SW pȤLDhRӂ8)BLSCQ8Lգa`V$,/_$]P79^+gM]}7߮{X'Y1 K$!D|tQ=M'0޾hm\:9,I ߂Ÿc⤅Fr&Q=Yaq^jͶ4_gG~soowEmҪ2'vGru-۷3\yY~q$eP'Ӣ. $)& GuzMQW92Z͛װ_<)~F8`On=g};q5'v[^Ű'?xv4>udc5`'JK%m)ܺQkGN p4S (˰A2RA`cs@!*Y*"AT 01J{ `5$+#8]jԔf@ ᢂ!e%+$`ėҚ$yx_̷ w@_M8E"]n A ڨ@Q)G,]*eQ3V){)jC4'QǑ8q$x_K9TN6v7rZ eE Ghke@VS*B\ϗM1jM)Z&s='p$>d. T~DϸH|M#01n3_7kepp;bK&emn"ݵx|^䱶}bC\y›;oG/f )mO7w_Bpxl/]ŊL#8S[̿Nn;eqȮ}5;Yxͭ9M 봙}hY\~vBy5;!WFF^!]А𱼞9v9s-sps=e-e]fW-^e\ UNK&lXY֯41H)^G~Fx"/o9>Nz,QY-:p}s`{"(dפ9 xjYNJ^U~gI*Ft#[w|0kyu&]~@F_~tz^#^~2q ǼKZEex0m2ѽʱrsgfBkb6KSL -K;ox7MtҟU\ZZw]}U;)bRDc/jW'lBj{U*קi<œ@esx5zWjJ4>>=BrbX址7z˸ʍv7ڕ9 v1*qOѻ0O T`< $j<-zys/w"ЫEjjТkexO񂗠]s)xO3[q5+F{ӹveC31l ķ;bRkH}D;ɊxOd 'nRTCMSރinUMUZx4٨>1>R(y G ]!HC 5yp'|<^W- z'Q Z(`De\El"K;H<=h7F zr˸R>*`W?=Y5 xw! nLT' #eР(0) JFg<^}C <жa6j)kKGovvJ]ZkH 6D6m>QM x~l 8x衽[7R("MκX0B1BzќzrK޹Q.V,r& $ <6HtԊ `JYPHFJ$t"9f5&Q=YͥtT|' NBuQDWԂڐhĕz`ͤ! i`&ucfcxOKJCp% I<̃PX:lḩyZ(v2QvɨsV5{>TݔV/S%}̄YMhMiN'DS NiX^ebw>hC8CQ}Y>u2FsGύ9SN4\FejN￵Nqv1/]OOxf7]bK-4&8L xO;1ܵ6EO<gY87~qzۦ8l;Q!9Fkqv̮IpQ;8d7~v!܈<)+m!G|Rwy]SA `?2>5NZ<;^E zHAH=2r0#ιNGpw>fN:jVH Ch(pCvBޗx׶سrx_;>릮;% C0xjI֭: Ř''uRLգ|wʝ3O'߾x'w]K&fKT1$$C.RiԼjot*AhJP}mƜGI_⟛Vz`V-*~p1nVx5ԸF];Ѭ@.JxaH"H0`;/I vY;r9 PzD;cV-=`r0@ͯo zgOTmr:(3"PRY%Gb`-­IBT8>x#'E\0}䀕DM8]+Xx!8M?ʼnWB1_Z '8Xt0? ^a] m85(Cᦝ瘛(ܬ7QƁeeLʸ҂s<)H("^k&(dШM IrC)Fs\pIKLlrC %d1) ,WZܺ2s3fޓ[+ w?^=?.p[ HfNƯ(Q-O׏[_7ى5]͘ޓ\eP,,btmnOb2 jzC ޓB g^t8 4J p V%I&XkO?(~44O9~Be[_?M]YRQ hQM?ޗz8G]Qf&~r]-e" }%iQr.GeԨ4_F,APyИntX)6/tyU1)#jޞOl?eD,1nin<ӂ%gOY{*vb>D^ 7(LWhW&4s.ƵLi)FzZFday.6XZ݇^t)vy$'اہl3xK"8' \]m6+wlr6nݶcgz_.YZ;NM*X,V=eHp` 1X4#D#T,7{rKU`b,֥+t+B`^^Q`쓚rb2*<>dMFUe'* /t1ψh7S jPBv0XSpc"Lc!8:spL>Fh\>.UT,82{DtUa5%Ψi| bDR?VsWl!k*y{¼OAy:F\҅*)@WM53|W{OxGzp=5 }QYrY FFL (gMF_L+^9CX-HJygR+ J9lB<ގlB5h;IuyúA2&F -}kkT*r%xto=J;hL7Xn+ۭ|=1G>SCloMv+^JČ`ʖ ^ =nyQO8B҈FyIK*=o K7oz׈^ҾwYZr˸uY W&f)xWsa0&y. 0B Cm!8OIJ4#GHA5rQԨ'|5Ny:+n+ l5lisoRoҝ!"b [AU{kezUvg{T$AuИ0vz4W,$!Rba][.pmj>ʝC` '}5>rEާ[,&u:2N3ZϹQdAc9FD`B4 =EuaŤR2CvIǃsB,s˫y]T5uD>jdM^6\b8iF>T;OLaU QF}tИpJԴ5Qb5'(K&[ƇN nsʈ(VF9V4f-K}(~Ob.}ŘmJ(VS,bR~R~)jN||*Bc.fTyF uИ9)fu-IT)n#ē#T󡼍X3*gԙckJ0=^]zz$;'3%"dFYVmAyI5FsC>桚eyDPrG S9݇x`y!hW aLî{sk1a禒|R7MxI1Al"=Ƀ8'KT2e_LQNzS`E.zn@&,`74I6M;x<}=Y d:+USJ& ZigFtwsmx}mj>"7?J[1x큢1ape"}QO8PJ5TϮ '(hoidͼVu2P;| S툇HRr8X` 4WR^4 廡єmC.ۆj><<$1d.(z>ʗGL.HՓǨJ0=ꉡItИpa 8"IEt`MN:ܦgiͮZ/d5w O\v7ҜֵӬ X(HʂDsb6PDT,='2"!)J'"7{rZCeԜdU%S9RpOSbI9sDфi[ylli>/bd/ho^j>^ Xw"'aR$qE}]og>PpF:J<|ĒTL4`yCQ:܉vFBv0uvoqK#*۾k[ԃHMS_~nyqS>œ0߸GCg*ofo z _?|Xjl~`7t/n-Ff|>*m΃]l%ֺ"Ϙt8ԛٝ WsnYCbYVMqPcNU%:;c"9τ];W,8Twzo+W^wj<{40=62C5ٖ{(R[HlL2~;BvVV/{Nn!h?L.Y5aҕW(7REhiV=O@Ju3PQ1DMI[ݐjI>L90'kF~]ۣ.bl,5{_i \uss_'&b@U%Ohta3fx&T Y3p.Ü$pr89[m5s#CT.hT+9#eΝ;zA{u`w0GEmY>-j(meոK~ձpN܊C!q.RLw&AEt& )sz+M FSfq\\u&I_zV|&}Uoye k׍W5XB:cP 6N{FTd6#RVa 7;"R$WXEk:h|fEV]y_aNdVfj$%KI LX’v;iVK$Fbw-azƉ4ar1b=PuRI4VqDtTqB^Khl:|% GZ8cd3+/EdFRHv%+LhoXP;4KSR0ψINd&8eeIAt Q|\gx V{Гi^NA|Ρ^,g-}b/Qo߯RѳCdogϵW>*"Wm:'+AV[~s4:xEY.n u2w>8FS,| gpQ4"3̹s.$/O>s֝|9%hqR7"sK܂YQEdAsڸ]"be;klr1/{( 'g ,!äV2\42GuC\&0}GagDZ{I9lg0g(oI{!n~+En0KQ׷`VrAyfu4S !URΌ-gHu@g*tb zSڸnܓ{|nQ*!er'2\8 kck>!ߤWp/>(ܶ#ig ^)F4Ԯ,2GWt)()rB'W%lFW^i"UFW 2O.*1麈eJ))*dqp 2Uw=(sm;aL j6U3_^LsL+i;*A#2spgyB]0뻖J='(Fd+F2ϸ/rOVex`^̄+\ENJG_*bG;-E8bg4"3 ųEY"l6 xHd|OA#2spU8a3".哃ND*HL7!t$1~3 f % ?lT[.xîp\U{~1JIo2 KQƂ[yd&QF +0n&nZ ؃"kƼ߭o[/oA1%Ge-)֪GtXfPLs_ Jq3&89Dm-w7_s A#2sπ>(>QbWVP]\"*iNDpGcMg.LB)9]gzf])¼AUuo*@Q5:he׏CZKW0c 8/7`7}Kc3%=mx+! vyRcN\1:spcz@:jW^x:@Wjy k5Mnz7ECpnGouu/q(Th߅Mϯ=K:Lu02g0_V՗o]&y¯ _@n>{yWî_) cُ-_7>BsWV?ݢi K" %7 t7?!0g &bNO7X4'{Y5Y\ʠ}%xNͩfl~̷M}8m} U V>[W/Jhiϸr89B8Khs{XEAXs=OnѼțwp20[Ww eiE-fY Uq>?)ןM)qήnR3_f_T.@N=9lɵgXT8+lgqX>/Zc5;g\fJ|* ~Wnb=o];N?4Bn6{ `kϡ-~M='!>4~5B/.>,]UǏ G?Y-G*P`x}K2t}SeB]13j_0jĒp@C¦^S~[~a3( II)YIkIsbIOk %>%Zֻ 7zn0 ]$:B՞>8DB7?{jS6Q )D(NmMRP&Bg[[e7_[lEq^I^OeZI&ST-ۈ ոKTUזv$#Nx*eP c=*vVi$j%iTؑ/<p VGxj!s]{ڙ _ML._gJ,'S,D|<<1QTdñWǏ]kpsaXRէ>^ \?/ d%BEha =&f%5QXAX/Aȅґ21" Tzy0dBQO`@ N{EְW 'UD@XncQ4ykhȋF} җw)*{cSw*dcPq^M{ aI8A/HNsJ;7u*iGmI Va C$mޔKx"bVTFdƒ(f͈Ca1g6rɊŒx0W1W1zbAl+3BY}!xGsЋD|Mn"kTk4'U:R&WV]GLGYiwEFbU|̸ __>)a\.,;DžcixIT.^fcN# #B[ڣ .egXT1s4d_3Kisk\"k\wLrO E s#kiUTm[OX ka)O"fhbP{,^>Uө^*ch\(A23# P*lx ̒~h,^Hc7/$och ؇|zqGinU[UNxU%+?nUQ_ 1IRN<e(L l84e (%N__}7xόRK.9grOþ0ǁLPQŗ>.ʴqϵ7gBR~D_MVGF.9o]CG72чW\l޶Nj^- )Ih-qYޯoHGӼtgg~]zA1Xb0+4h~b~6"Ȗ!z{e5JIb}ϙLJ]pU'd++p-^7Xc,Pq$Ԁ$P<˭mbFo0TCQO}*SjOC4QѴD࣯ f+DTqZ2`kadY[N ,Hr:kǞ<~8i%NgA|_D/bwHE̺ⓟ:VLTP,ˉ-LA( zd.̈M: TkOLRK=ZLѻUt/1{Y,sD P(::ir̦"B1j*3獂FEŬOwD_:zL2zvBJCubЏ݁s}.ړǿ2f@*p(cؽ^H 3~SS |0V"pPcX>{a2~Eԍ%F\[RgSmQ&>lDž32fc_1^_uDc"jx:uhݧ^S{=>&,bY'ɘt|%I2AFT3 SmP|(~WhЋ\Q|MKzP4M]"LG5vGAdJ2-+ @jJ&w&)8 3rU|aGu< L뎥)1^8*$&l[hZBZ+腄"V( /)4چ(I#6\8t[鶇xW/4bAW+tă z^iH@*DDG=s͚Y_@6H>͛"Fmpij͞o*o o@@ `M`X *nB6n0 8t/CA4AE-�T{3+m9}oav=--% NiDOOA&&ҔM1gDp] `\ڔ9vS|r~ xG׼c^z}p$eT'0 ҤѵaV`Eix?w _\A" I&$.X%۸2|FHBפKaa2ZƅT6S7+, Ef1cp`USä[&`K -fZ̈́ΐg;R0 1vc=ytID9ƭ93"BD'~Ú&goЋ@F|M #?e,IXޖąN|V4l.,^Ęa "oQa_(҈YrS!)( DTʴ.4>K^4E4ëe`iŕ=#`=`)5SQ{#&uuIf_~Z",a,WdВ#bGh8+wvZ*Lhօ2 aҚ'SIS'ni_Z:"oBQ0 ^"h)1Hzn(qe*=Hчɥr3"5t-(,)R5Qm0{2z$xV~;{ǎEgRT1g [U_2`*M3dRU~TIqxO.]\CGD^FEUxږ:fo5>v{sC{7JPƫ`12LB|wnnoy|xF[Zϴ~؆p΍VƩo?i\hK[%➮h4A2ioIif\㛳)RQ,wϏh^YCGf^F/PvV,pb=I`B[rKo" 39o-Vq >_Uw'YkU1TT_۸\0oEp!տ"f="f)lU$yH2y_%L^R$ͳ3"xQR\>0a7[?}75H!?#[l9,}AhC憚k*bptՉxe :&{i*} "Dz4N2ٌ?4O2aȖ9NcfĎ~Jh,[>$;}9SBT<$+]BUkވ|-I@BK8Lћϱ]v:*ܕE:p;)Rkؗ[F?ϔTiQ9W%*M$,mE gČda/1Ԓ3gz;ѭ 7Å]xN6LVZ("4흯$ܘVU9<=:[9?9 u )=2___WDꓧ#7zfǩӀp7 T/'i~}|5ɸCeÀv46jm_63u֦{ySsC#;+_MxV}j E_|}᏿/<K[&_ i< C̏rnM(Ǥ\b훽x?/O<7T!ߗc{Gjԏ`86/^$h>ٽe2}if-ך<ZZ+oyfKE^Q<Ζ.",,7 1|!BWCQpW/ei4${?Xb 4r#$ xP\!v6pKp V H8qŻ5m~|l?iǍmخdIxY3lV_VW=lvX7Sm7 T^q-+}+'[/{Uf-Ofeh*re?Gbt6|npVi;ӧ\ݶ2ݫk yV80ڜ۴> B?nxxC ]PSpuvTW7LrXԻ nnGN1قYaQS/l#'8-0}Y>^R^$}i0 &O4A?VA_;>yKpO}.):bY0"I#Vr*v`KX/:z=jj  ֮dsZe/do$Y_5/B^?Df}|52Υ͹%Y4-D !s4*2g*U /WDzYkӬng,g5٠Pgp#gP~m_5.=L$\ O2JL2"ii3Ug"t= M?_;tfD9|8BPL*!9Kez5P'笰|μ8`yﻕDMA ݻJ/EQ![k!g{z=!ޗ ݗb%g|*+ʣSX2ŔѺyL9d[>yx)Ti a[`~WՇ=rr1qh @v^Ew%2`[ <>S'Ims,}Hh[0> ³|u(* Yhs@JlȩOAGD A]bIʉpd&&)rCZ^ ts _i=rB+wq$YiWUT<>{qԘQӴ #FMN&JXj?Rq$Ԅ8.!qN(cndFN+\ȩs|'#>h13}H,1@+1<IaII++3 ՠ?AvƾvE8O*c< "i]?`lׅNtqd }&!/49J uq᷿1﫳u2:J:rlddԭpu0]|gV S.TVM.}.vxqu|#Gg߱T6oyqPlE:Rct2"#; / !Mcdcde#&3¸S ]L}(Vhq SiFuǔF`W2&O\YkeV`f<ȑݑѧB#2 / ADU=>%9.*{ ΰH8)GNA<%2}HY!#zgm$G462,nr@n'v7ZR{{bն2["[EzY41^Mb>> Tbl:lBS(q.`J{ɷ/⇎j1oŹ |sG<f(VRֶ]HyYB:wYd* rZ͙x9u#5l !\vȡb{i0;`Eš7Tkؙ:16"twA"D{D!40#GάTJV쀑9ra>Ui͆* >FXѸ"*PkC= eAP \ŀ%~ycmU1!5ob$Hv p!&-N2>c3aJ R`dkBN}5}oWb7 3b9s)V`MYn.(@ dⴘPx[ : #(B#(0sNJ|H0"يC C@s ̟)tx'";J̍PLGϦ~c' &4mAf9"]"u<I.JAq:6ֱXвN!Hg!5;:P"`?x޿Fb8Q0$* 3RqY*RbF4򁫢GҠ0Fb0M{.H iy:ꪅJτ r QS &܉!^Xca0(d~6wfO5.4Hjx(K%saP6GePṈ`GcUswc^D!9]@įuSIy;+ 8oA|.(k+C0"`=lP`׳?:7r#z #1r"xGrz&=&$oo*i\RfX06cw(!PTPȀ7v勛=Je\vH9O>ҫ:`F< J*#?XOfw=A@d*-K8kU ^2š2Б C9l:>7(S!9mC jn#'ۙ!@"XFw$F R*^(QBŹnu wһ\\i9Xf[]B";*P:IAU҂]CF 4 <ʲjXIyG#1r"\!󹝔V;`&Ɵc>U?h;0gJP -*XN4޽[H;WWs!Z$܂5J/[ SgxmR?jbb6||5@ۏpmܷ/\p9wA~ HIee Ay0#'Bc_52Fb@Y ;|JޣzQH"h_&P=>t|?wO0)/ݲAs 7}vZH  YXEd,ȕNc͉-?7-+|+ [wЕ:IBs2`dqBZz^ Mփ>%$lMϓ~q_AaBy50Bb3& Sz>r vp?=+T"L+ 7X!b_޾_K&'E/0,~s}니^gO?N}[\̴Ox6%ˮ8667u |խ7^.[Y9}plȞ4pDThշ.==iت,( uد8a`ЧТ3Бڲ x_[V7(j:ruc,;uԿ0,`ۧ7;Ë߼h ֦QCYn?p Q7*NhwKfv!+mmzZJ&N=yoIxoob+dG4R , x{>,{6mr'ڻAV c:zQi"V,KuzgCJx5UX$M~ZIqyl𾱱!ߋcc5>+EM>szy,XӐ8um]C 5 yШ}C؜=fnIO6ܐ)" .%ZjOpO牜fw%9'kNi| wy X Q&{F x`M`8ȋoWsQakN썛7.'w-W3i6ZIhb+(ϰ-3}ͧrhl9M;`EaqLÊF1ɸ~m"ezo_(N8ow7>I=L7o1a&[܍f3"sTy[ -?_#=z\k?,'jnd`A~֟fKx0,2ԌiVaښn7W.bwULFٗev},uнg*=8{N%.yqi=c4 'PYV=*'Kה :?^Q[R67Y&sz ku%- spp_GJIHE}:4inʝ#Rag͞W4?>f抖;z۸An2ye\f+ƴrM?F<*&hP:7q0EW hP.qV`"ˠ øF@ƮHkCʠ]ܷC\"#:[-R1E nC90Rds_EXŅU%н^M+w'9~iE O`a4"׺R#J+k;p4CLn)-zxWT =% x8mKNQfsbRM]VS%7yn0^)hmnIP.qSD*e}Z'+rې3hp9/ch} aAao ;IhЊr!o(4rO0I,|!&X[Q L'a NgoIX/`'wXZp^D =/e"+)zgAUrBFwĸw!6 *]Y(*; "P')wG.axZ5{1!]mq;,v+g8_X{7PM󛎕 NT4AUIwڐttw䊒!p6 E%C,pT ӏE1j4凢C?]Y[ > To~r9 y{2y<0_Xq 1L0O2.kK6cTI=ɾj9#}EfgZL(^O7'?.G_zu`]<</Uv,Ue ,JAv+?ӏ[o떄 xX. L[@ֱ dn^*R  qsۜ"\,p ꄲ|<- sP6:,ƶ'pPSak;65N{:n𔧧 8$VD+c~BYH@\Yr V?4&T޳ /sUSZTe^Tغ!` NPB*02 bh6&6BS=+Mq{pB5^q{t=:_rёdx#ȑJs9Y+$G =kNG`aȓG:9b35F>{ ȉ N=zw[-@n>f9:K55f4#;&'G` n{?WrDW2jUTr4M逑9)Y=i]rHQrV;?1RwN 䙓%MHs!BIW.Yi9{ a|Rs%dr}},^l hL$==&ǃoU=:J;)rД[?Nȡ ʡJ 'b\R޵PfW͚;`EǡVhx84FI[h0%Q횩M^~1B2/-4=tƄ?{Bshg~iyhE)'/-4vGT/qT?Q M+;Rٹ{۸$꥽Wv(H@ e#izz{b'9χ/=:Z4X( Xk%y$xS /_m3Зfj3ė d,}hvg1~߳Q!D$ #"vz~26:gcEBcEWTBxNJ(&bf=Ĵ.҈ X,6ie\Y<~.n3M윇ܩn~շ٨<  %kԆ,;;x/{k,Vi|N}S7'MwE t0K@._oܿj>_iOx KUѣKT'-7}崸sAw?:a3#gò yz}|v9{50e7619+K{OȖ!Bt)o_]\}(_++/W!3\`y_6*v| 16O7 柟!7 B[+:Җ9!U>WZ _Vk!B=gy%x@(&-u dL j62Csc=+r!^H5A4l>`(7: yp3ؼ/7yB*AW3R +@B0| 16/)yKMZ@,6MIi| Q6_A૩!iZR'K3>d׬rќ#g}{0+g#ˣUvrI6|`p~4<OP7ۙioM?K79nmU ?M˦^,Pv&ϮLa7x]Evn'擷4#ٷK^ߟ;^͇n|gsX.=Lޕq,Зžn`A ˇ"[=C#ԌĦl鵝WZFةHo |m9FzgR>Qk9;mt0vhreK1Gֶ3(w㹿@)U~ῗo,g8kքx]nIb.*N_$ց}2 Z݆f0߽ gF<]𫜁a Olyyv9ܟEE} 6pGӹoFJ>4[80fS#!7 |Y;2{;u?WwW4"]ѭ^Fז`Z޸zdw*Z7Duq9[燀;0?Buq#GJ\xr> d1CP(<NJf~X{ۘBeuqxgE< ϧ;eXz y%W %>̙C5<0Kuu>ո'i@vkzmZ\qs=-חJZFGUJ-1Vx~:B3T6r*2"2)!rzlbOeSoje>{Wy+z0Zh=eΤ*U|-) i^:DR:HU(^xLJSh1 yD3LŞ!J0/#řo|?"̛Q Z# G{M%zY Nj`zȣQ{ٹi xv{(u|KP7l0}&l_vZtZg8Ym֘MMwo>xIbs*7 _ oo*kѿE䂺Z+"uځwg[:I:$.m?AojLrmO|[+$8!3vm 9F-SF< ^=f'c!'&}\ń +bT(.R$&ˉc9cRy*$6 [20D{9|Ӗy7O?|^ǒS:եT y1#ZsaƻE#=Hux5Zh_C]1^_7~q~0`@,(a_RBn_^%D[u,18JXk&2t|;׫{ӌw^GE&>3οm_ۯ:{վMz''/7 2jM8d"1 yA'J"Ҥu(r"u};:'-X ;.'D-!ϟ3x+ ۸9Zj -YTCJZxC+ VIr)A"R"OXw9$KqnM+QShB֖͏VMǫ"WHU.R~_ub2GKV6ilB'ёze!Vs']dRZAL"Ia#LD !Gfn~jT u6.Lyw؞>l#9,%la9*۪yfsoxtp9f:IdcaB>~rF/FŤ0,GmFb5Fh#6D ,Fs&̩ !~)0Yy)ښ=#z9էѼ }Uy͋XkQ֒8Y_vuZh8?fZ.5vgGqXqӊY;ka)%>^S*^5*^֫zz!fbUbUlW*^֫zZV*[vnۭbUlm>fX2vzi ìՀat5SZa*,]t_)PUUX KWa*,]tn)Εt"tUX KWaҮtnA]1))TxqnXk5olV0o§h!VFFyktjj>.; /{#0& n_,QL|nUOvFaC>O:Gا=r=Sz U5$v!r5TevQ92g(3Na4''@LYs`;/ ?\ꕹ{&1 Tq櫶?4d!ڴ._HcW˃hҵ*Į x39>~u66F6i\|3ͥgs6яߑ[Nj\=lky]oW{p|? 9nh&=3։r%9ןiײ:.!3oHiwم)<ӧRbuKr~pf#_=BC!l#$F"2B t.f0\HJP2`M@ZX.*loR/ooCw^s' Ŭra_Qwq>ʔSYYWO'˫5,)ULFӴ)آkz@%[tm %lOu*}6#XnZy[x}=ۍь%/͗aGÏo^ve*+׳߂Pl@ H.ȐF2F2ևb\O)gq13z9vth&UvSwjC_1b/Y:Mע\GaxRӐc<ٸ7GJJSRߪʆE:së oa::s771Qg:;5{f MMA  E@ZvEIԂt-2eRWĬ4n D/ p˛U= 4K&t]Ͽ5\Htuelb9#K_N!|(o;-f|P-7L2.nrWn`M 0&27w( y^[OzUszǩ½sh+g>]Ys2+݁3;穌R;  g:0/ZE*1(F# b2LۙޙL訵W,[ǭﴰ-l9CPX9pypFH5yL~>GpsA}>GF<`>Gps}9}Q 3t}f>3]Lg3ӽh3"cm:bڃ@xzLS[b="^^+J>w?Ɠ!D!e!x0k5f,`ZFL&Z i㭰]1ܔ' ianE =Rt> 1i޷΀~{}ӛ,\^+.LH^jT]_:k,ߵnΦMSZ~e&8IM Ú3@JOꁝtO2{$ nnBJ浴v{oռ,:iWJ+}n#"С=E{tB:yEۺ(4d.HHH?ɫYo 7ȂeM8LǙV2C.bYr5,|!$9a⧜"/g&YUԃ`;ː:Ò3$Y~8q$є\rG0 S?A@#+-:ۺ-_E<)#$QH9ƨ-w) XwCʔIX/Tj4vCP q쀝)&8 7^W K ٨]In"' [cI"N ՞ἶ)DEO ɽ^MA;rt>SG>{p} ƶ/=ێÔ=[Ɏt 9\!r -%& }^"k(dY=9D%T8 ySKI S"!&"Ŝ"C\8_9OsQRq3r—FUSog/E5m]ܳ>ٮElSš 5Dd MFNǣ07ʄ;գjtqX[Ƣc\'78Ŝ9s˭#:F ^Tۂuݺ1Xi&&VQR1qV{t"iZ+I7xPnK 6GlwuT+֗ !fKf*jHl; G1b|QZ4>eJh{T΄Vys BWzd/( kK\>NS>0YSrf+vwzVa@-<}Z6C[tGE`I㫎$<ðEgIt'0QxqIq&)s5 Q7/ qfA%*Ű[)r]q2v}hB=HQ]{Tϻ;JF( |nGm7 ,3A ;ѵ8RbͮYÉ^eأFfWr3 ~nD+0G JҼ@tY$h) Ӂi]Ia@ʾ \HJb@YGe9AQNZ J{iLvkױ^^i@ )Bb%#ɷZobA% 9Ij$f.]8B6-{M5״ԴְGP3Ŗ)u7sz6Jч_ގ, PdE+yn?_1r~:~̛[-:8O4d)Ы7(PI7?|omMK1oӥr:]QlRl^n/գLU\eRtZrY}n:zRLdgU7֯()sTe7,Cf݀.,<`Ιd*W{cgm^E>ג)E 04>)|>m~?hdVv1qr;XqT>~9WCq;G%gE|#۠LjARL;z/;b!Oǟ!=/i?G}Ikʐ?5ʬ~nMA .Y nvF bK@Bɹ_^Kh,}XGuǢ1ng OpPW;i2GHypd={^I.I> G+ʜ]eګGZXixH}[gVv/suQкLЖ(<ƎmǢICj.zviBcOL)c*ӂ(~ƛcmm!K>Q'_:Zh>)$2wizܩjNhDrڵYE f =j5 4e<0BGpɫEX)\WB螱nWV*}6 ŊIˈTae͹@|FK6EP CP0Q1ϽS  #E s{E:"/9->A$i- |[[eJLXָOio%qwze\iCeT?M-p OcX,5=inݙ8MiOO SxO!7RbuK~pf#_EQ>di c&qD0|"]r3K;Lg 90yn0 d;:m-XR"S.*loR/ooCw^;OY 46>:4zN>ʔSYYWO!NOOfWkQULCkU H5=@Ӓ-6 ^~] W%nZYU_/"/*RVUVga߂Pl@ H)F鲉 i$)m$*,@i}(L\L߸=]^g;:Wk*A;D1,0בD|?!,M'xquGJJoUeâz09|w瘨{ֽo = >=H_Bp؛AB?mlo=Ç I3)+˜aOOuU:|ģe[- N o=eWnxḢfKw%4 J~ӟD&uwKR&t== tzV?ye+I}9(>mMH3>(!eM鲲Cz/|^68J"h9 ;f! GESdC9kTYLEvL!1 '(cxQ`0xz" UsHkX1҈ ޕieWf7{ok>zk:⣣=t\lVV =magofq9^x֌pcKDHZm+U)R1s}a% AOpuZ85?tqZ Oo^\'nl*ϥGNK|/nQGE[ B:rFSݭsy5W\KQBwsWΜ\%x)M$ Jb&o:ѷ^7 Fg*W36v嚽Uqku ( >>j.Ȏ'}ڀ0zD(ٝ j"mcx:5f4k.RJ.ìS1G];kNt3^ͫ0Z9MnǨ1ܖ('&zV}N Uod:1Ŝ0 J< 9+x5'։#sTJO ]rR̩Zf7"i0"+; ZjΒ}(fp.SNy(hv+'?kNP^ Xsb4>ڊTT뎇(pY"X7w)XUd *0wHsPE\l@=Ljie茒A('`+#Ki-#AL#-#j,(@pGg4WYM")~UV0L ˯Ξ =:P%qqV}Qiq7vMBl ,JPX)FDob]0#fq'F0ER$:%6snaTʃ#VJ.C6֪`rQKqshЪ),cDy؂x 2yeJnMKA-i9+80k2&*b2 )`)$;MYXy0Nq0ci3l( x,Ci0#q4Mїe{aƦs)̊͗Glax^HPDtFp)xӓ4TJvumÌ?J?-Y novZĞhg305оg%NgɲܷI~?0<ϔ=1_>5Q̧Ii ޿[~]0]۠_oG93NJ,$Ȳ$KҖwc3_LAW IM?XY-;n|пG4KrIsw;,qpGg*fORa9DM|-mm "( AHRVa i1cMFSp5A=jl{gӟ05 k}kP7lKu%_%;4H׷vc{|G7Ӳ͔$;kOnl:IMΡ|hSm4g9672ԷYps۔pSl-WܬeLc{2y_l޼- AN?ϘWxҿ V/oc+c[o?Bfm՞d@qn4`nN"|Vڻ9j[$\^oe: FHrnIl_h|xہA2KAYM/ %+E lݥ{9'}v@r%y=:s_c9f͡'c&N(a,%Bȉ6Fm &b]8k"PJ8ӡa9$$V8m0RS-yA)ıqZ+n'eֺ]|ߌ}26饳3~aG#h"268 DLQwɖrZ'L˭G20 dy6 X:7bvϨ$Q xpJ@^ꢒ*ykO}{XEN!#`k U c0耰 ߜ  Kg3y4}y 7 ll64}VRXeo:!}{4T v T֩YFKR } Hp8TBC(o <|JWp,.: @ۖ,,(4C,^WU:{U=;oQ!tB>66q:h1̣T ?q4x<emޖ*s~}_jL}>)Ĵ.{yQނa:3irh8v(T-ڱHJ W)*%p_Uaw>TyLVJX4.t S(Xxw֎}v{K WϮ!z:oޜf3X_Ji}t7$]?ŨtXQBXDg:?0`RB6MG7>[B^{#]>8[`w+.~(n>O| B[.S~47ΜtsazwE7mL2@,q>JAimw1s/#8WHonʡ۬v9^)߃"y6WցPsͧ}.u2 8#IIw"avfY'CB)7jOTGGEdQ0A[[]$JHz$-gY֝j0C+3=R׆"U;Q__Nm;\s |dX%eM 7) S. 6cN(E"[XcI"LҖ_F l]N7ƛB3\Ź#[[t۫li2Eqmksk@nܗ ~8)Nyw81EEdΈe jS"6,Rg2DOL.DpCLDK9 Q eD/"[(㐘8w ڪgE&r)Z+jZׇFU7S6b5}GIgwQo[SšDQ+Vi+mbhN؜e5}4(EǸ.>OC >M .RFXp4 }q/ 5tw+2>g}Ȑia}9N`.0 *R ˽)׆7.c'µNJx~J{j۠JF(bR>RLSNq飶ěU`̙`KYuh]+9kћ]ܑnu⦃Ig7f0}'<2 zQk %i^B:`^e.DjcXs 'H Xkc}9@Aӆ=d6lL+ jH?: $:ju4(&pІ5-eK˖֥u`KrUD7mw Nn&=^M/tV뒰B Duznz&i|OR(?mk]9I HӀ")˕RhN9&NxF""C4Hm:G ̳xōv0p0,G#oa}MjRDA1 ^ -:fՄj+ RjE+h:Mz?HwD@4:&(fEwZDGpΪ (OrGѹox>aLXwh8Lh LK'-#NS%IEF4k*e(QϢGqm.RX:`MyU$jgF)J 2"aK?8ÑbQ[aP"#I  j nx Rף"̜+7YS J7RC7kk߆S|3ȍ˫~[':&,sO*RlW}w^7?*'E!Wg]/Fx o&kCW 4'AnQژ.r 6ƴ|{P>xT2`ɏng$r{u8m[,׼mc~+6aroKtf破֍'/ _qGO56ŁцvmVAkuX! Y:֑0 on0l\+8T7.Qyhyp\=6L?ӿ~ᇟ?A$>}~C8p&֩?_^сߟ4Othn:,O'4P^2v H9=iVCrK~s3>edj>R]3hzl>u%Z٫mc^ͽ-~+o\#탲F>/2]ަA߸T)1^G`CXh!Z`֥ + l\_?2&wj>g`fn s&ꤠ I,2q$z,Kѯ˨lckPv@hvy4.Ga:FL/^KI%,q;ZQvZ.BO.uߗGRuex"y5lsL]-fۭZN_{PwAz-|ƵhrXmd{- zl۫<5cb&LIJ^ˈ39Z8 vӬiZ&j~wa-G}gIbd;ezYu!NGO]IҀ7pR"TUG>JI9ge"urveܼd6mDrveBęWz+^B 1$)/ar=i'4דz\OIs=i'4Lm>Qx|]($(OdP0Y,2*RƜ3p}B*< rQZ@bmZBBDX+[|0T3KTlo3^̞Ĵ=U?`F<*hbYI3Z˭N|:tQҌX|vr'vԍ+j*I'މUF2>a8"ux|>L*0;Gl07wI{w(_>Ni?N=꧇C123pZJď^Fg[>v Hjǔ T%<׻TtuM\Zo5i_U!ߢgꚕsϷFԅnlPGbs"&>{T6U齚&-ڽ_/\+.ZB;)#7 ï~v_C0JK.Q ae@q܅/89YWS@^ At%To[,c<"*dk>KC:+t(v @l+ҽf?Ûm -Y7e!<[Ү9l\a0.:8n[a gژQjf(!vNƲyBs I 4'BF?s|Dc@R/7D֓FbZޓD@a75X}8Yď&1Mq $.J=Btk"3۞D K1>uLN۞h=;^nǯ'=pfU=;f!v1}vL ]~Z<]'ȜaHq #U09Im^a5^QvvدW,=~V)(=PP@4 )(BmPJ nSg$x̌;,$cD91@C!%"P]ɺs+;?={#LyPy /ߟΘ?h*;\Ri-Bk5rKay$ $^G9biCs@'vwi\Ƚh i|Yy18ӱan'طhw?8kr?_nUƹ_ǂñOcn"̒iA-i?7gq~v::A2sRgZ"_J0Ѳ`0v0V[XsHQ3Sg&"(, Ay Nyl~Aa3@`g=컑vK^ϋtVWN׋zDhjbؤ{UZ&EMggMೂl+H=ţ39X`\;UA2PΞ g*e<^ẘs˳GXRY)ց;sysc3OݽRl;q+=֩gm>6L)ָ˗vI4*wAVzԂqz|;ݍqb{f mv8;;}e+ OR%%&b^H=h3J:v"uZ_͔*ƳNWwcECwʛ(p 6 eQǎ].aMȖx#×o#^ln@!V2js|KǝY͝OFZSTLG7q!&BeqcMTA b,4ղ"m܉\>pNB$RӕcS)XE9c0-!JBĺc+Q,یJL38؟O:C2pơxYM?SM#K5KLOcjqd\9%v0 ίFiF%y.奍тIXXfDAE\ ; ɖWGKOJOͬD fQbiИXܘۃ:.]8,:|>$K*[-̃F?kQ̓йs9F=@hsV5Jk qy`*JR͸F,Y%A:>*N?#Ps;D@2 Y^Gw o)dqD)Ͱ2Mb}>4^̞ɹhc=L$(vp\Ryv5'Ȟ}Mr2Z5X&ٻ6d %klF ^ٛ=+iqEZ34Q]ǯ1 N9ťoW12#XP1g&"k/8tu4E:Ejs0Ms YپBl^.k޺U{KkмyΠFmTr/$K8itj*l?Qw6ϸUQeM&ޖd#X|ٽxOs4:MYGH(򹖜N)bxЄ1m<.I9 Ƙct|5֤ϵL<yIΏY;֮3&Fj+(uTΨlZNVj6a7;DlZPHq抓Z(1Z1OǷM0&>+N$UVIG;jj{J-Q:I2 ˕콹6(?MsgȦv3<:9ZQo DBIp] N\լ0 ]Õ'ėRHչܭE >]| |g !SO*d,lJ!Viֳ7aƵ_osʺ2$gL@M{θhՕf[^"֤6"nSw"n A$ kmk9Ef<֤"b]œkp|'QE/s}M] u{#"SG)UKh9-]ix5i&=}m/j(| kXr%>6v{~6BZz&Zu(Fy]K|B)Bފ͙I' 7҆czdc?UV\|ZىU/6N_·L| *}GUaccaS%B/).,]Tb/^*AEU;Z6Œ LbEVbc8I( \ET@y``戓;^}H\`EԈq 4p3t?FW "uIgB 觷Gft(ۋo@ނOkYp]HvMZ!7 ̓9Vc|L&D 9)" K@t&"(@K0Gpa FGT fw wX$UT J aQFJL%,Q{ <:ѶT_ҲI%FR) KE $xQ4:rHA=T@ MW55{ҲM贔Z:cdQ99f[e1N-"Q-q4x<(@f${tB|i9qvE^z6soh8׫ΣHb *QW4&D "!z)CEw F3}RlD !JR*bJ ^Dr1pq&\w +ŠI{H" +`=R++**1A(THeWUi4wU*]JsW954L} ~F{7t(;.%YR(0;3X:˥8-#=;1p3QW+ݕ@we2] tW+ݕJzue2Нtue2]NtV+X'c]N:I$+ݕ@2/mg_PE;;%=Ws_+Ie5.Fvdypˆ^FcD2Y+냉U5eDDFQ4`pHnm+|IG&T j2:j\ͿX᪋]==/ịUht* 62( F刊`) NPBj%V_+m+(^˓ۇ7ם\Ji4K.˺uy͆.^K ic FOjVɹ׶I@P*? (+4R Pm%bAJC&Z!@q$+r7NV.")TD2'Tfi%1 Y|zJH%]ι&2nzǟ?dfa%/B6g`)v(vg?ʶ}VJסf+pR` 0E L&n'SPagdUi M!#rTapR|1\t뻳YݳAa8]iAG?:鏓p]Boz۬(&(J7?߂.ߝ>F M-6;qRgKU[s#ZY~cifT/|iq|^Җ%m+Al8~ 5Xy/ڞ_e]7Nim7UfBpTQyGfNɲear"]$z+FL%I'i#e$ú}>$K88zQt7/ecaqW߁::{wwo~go}8DÏo| AϽP1t{t-w Aia37藡6\f [ ĥ 7i'm8-fpə b~(?Q﫣T,5,/0RSڈ,H=>X`^_.0ӵ޿ UbtMoSIv' m%XsQ#k)Dʿ9Im뼼{JFx36TyẼoTbQi 2FĂm괲3 =ٷ6龁xV53Jnm 3Ͱ3Y:+wWʅ>jFlG6ie)i 'I */N>ȹ].4DၱkT V0bҝ)٣"8%{KGT)];Nl  #xpceAsÌ1LY) $h},մ}+ak U WtY@S.*5XJ1VٖqRkَVBE rx|޼ϯ9N.t4aZTV^VءAG Iv#~89)$ŭ  {NG,eu[H.98R,X-yYOTm[0>:VIXPrJDPk0s#nR1G^"6Y%E, Ƹ#aREb?D h$-ֆoiۀ 'yK_|Ϸgt|..]AGće~ <| ^'/OUxy@y8)K#lf>lgfvK3+ܼj-.qhŮs~etjBMańvoOoۛr'[]Penf Û͛D;-/_-W>L}{O7-܄]pKCH-/@9GSvQ@!ToT2ʑ>g[l_\)N5>rbQ[.)gSDJݽa䐤XHMLc1>]Is#7+Ĥ}jvG>t݇ -_?$ERLGX%1A [,RLp7aP= =wI4Z"e~aG#hhx˄ODrPJ-z A2 8̲(/Sƹ&zL#Tyo :HD%λǖAq\#2\*밊ƥ-K"RkTM DhL$xX[2;,W8m6Ne2g% + dRA! QLUʓyxl9}xMv[!rAx$s L˽\9&y1X1mi^E$Up pJ Lz^2GԾj-=9u y[[\c9[lSS .c*ׅύUW ~JI`׫0L]ҏ_APE_t:߆E|wU7mto~\_ i(=1 L朠tD-ѥຍ4C#A*: פUj:Rj f( Hk#rG%V^eeem& Vk t-*یVw.YlGZWN2QR0%eE $I NV8\,~,“`NӸ~^^&iRG/Y|@'WC2IAγy^\C}} jV_\<ȧ߳ qwuҍ{ɬ[u>՜,v6=LG5jYeo2ߜ$`"{&CDdehp`%J7bi1gaH||fUtC&.ߕnWU--7(lx] 6Gfl g\Ù13c1WN֙C{dLq`ye@һj6NR&$6l}-i6d\.A<)KSr+&,鞗`j+MӯM)|q +ZyN7,L Tx)c~}K֩-'Y`4,^g <: ,avJCygƼu%Ib/Ǿ1!ݬ'NPiAYIN2z.WnCcﲻYK?Wmd>0EA3XR?StyDtxm9x\-o|$r9\+r\+ʜyRCW.yr^W.ys WH\+=r^W.4yr^WT/TBH琪 `4:]g{߽"`T-P@%@^[3ō_>`/I&HLa4o4`P$/aQ | 8L]P-mb?88Ws(a2~%[TINo:C;|h|}l4NLJ9z̹ϙg3yj)orƓ^y/P,KG".$̧\0g\0m6M#-?(GtIՇE*ȵV-@X HE\a#F8_${z|G%-̙[m$cAW9*9A Z"rj"H[WArRPy:۝,@BKQ%߃ m"9tL;aO9i4 O )| >2<H2!Z7}xܝS0Ȩ*Vd* !8d4ؾ%= x)rRE@ac3S QHDRe8ysa F /e&#QHYu2LwZ D佖豉hj4BZ"ZMgGީl>%`I\c2G|eL<ߞKlZC0:Rj`Z>e3Zn»w=M> ktkyInڴTO]12 m} bxe̊0Ŵu%8v Ѹ?l]n7-QfB=^h~8y|Kބ`5,9i^tۻ i|> k鐎'|6?ٜ !Q=lr;gs;g~}@JX!_3| 9w3| 9w3| Z,ƹ ;x73=TH W)>L&)(bN-*r|1M^~ =;@D|cӶpł,P0!xʈh5.WC4.}^.8Angj̶ݘJ3m7=p;PҶ,~eTn?]0۷,L>.܂ʼn¬)) ͘. L4Xe=LpWo2fBnJvO\x="0CnNӸ~^^&8ͨa+:|xчKKplI˃y9BzY @BՆqу&;z4YD۷c߉"NJ)K[K=R q * #HEj LDvA MWM -,Q;7 I3`&J1,,VJ\YZvm/Y7O!tVJh-Jl1DSNq飶ěU`̙s Z_'FΝ&I5\NX/ݤ_6/Aa~d0khd0'ZK(Ir yI0R:" rr kWa iU3L1hL> 'H е>r i2e{E<FHH5pFBb%#11)DXҠ8KFbF<`͜9-s>99lLb0YM7uH ˅m.̈?1Jw,`r-x,Dl.`B Duzng}c|Ϛ^o~u]w]yvz4;ae[ >=ϺHӀ")˕RhN9&NxF""C4HmKK$\.%6!`BL1jveZ^jF|itv+ U/0 ͤ PvBx;m"ᕦ[xȜ+e-(yYB"(+4R Pm%bAJCRIhP VV-<_up2݇Y0R")9IE$s2JaQiƝVc `*yr{qS"T# $˼]f݅@(Őf@c`Z:iqL,hεTd5#ȿLRM0A('`wyĊiy5HkI 2ZV(p1(P]Fj۴\0uS3W`bch@R^y$$!?\ K# K[oBd]'Frd\äs{S_'{wpwhR(Ufu0;sev:tj{Vj5X4zAcp~j}$aXA SD.FRBw S/ El *:mcS&$G.*RϧazKe\~I4}44>ZY32ZT쯺J ߝ܌pXPT*:pvjCj7I6qU%yQӪosg֝idb?+S?O~|\|\f'|c߻-k`$ o'UG ;@% #1GJ֍t4 iS8"|a0`ZŤŇlˇ1{7Ɇ ܵ:*AGdӨ SlÒ!x$ ́|6m:Ib/pg8ߏ;՛:@M; ;7r /?|O`|ߏ_>4_`&U͋)H)E x4?e[CxA<00.Cmrø؇2=7WQ}t,. 9PU䎟} l~Sm,Vg%xW"D"7# )}0-0ie'6:HM"z-ؤhZN`ߤB hHlOLkB<.hMĒ\TO$ցNG~Wف@qI!|@Đ G0_ݺ p{2;q9u| ۻ@ҰqΌVME}`i=̩JXg.N%"Mh"V9ꭶF\ҞU /Jێ%l8 p0uG^|댽[ )bYʲd :`vDlf0/ (0kUVZmR/FQTe_7<ď U#3iA`D^_л0!.GNjlFMcTvs-3ëQ~̯إ\BP3zP^Lt$?wj /a?!g7 ep7 .= ^GE~ۨDm׼Me@nnfg \n9gcR0kIE\( .{%:p½W ^+{o`P ^+{{p½W ^Q({{HW+{{½W+{EҊI+{{pЅӄ.H,{{p½W ^!8*{%x+{e½W ^+{{p½W ^+{{p½W ^+{%~KJ*{%^+{{p½WQ@QCל$8Ft=-A%=Z'kC06$+6BXh m` ,6xg˖%WfF3gRf[53)a{̢T} _n^⪦;e5ݼ@H"5R1-i>eڢOBZcAѤÚKA6J>9;ߴwLBoQOm` "YA_@viZ'x] XSl(1[b:פmߺ'0LeMf6Eǽr Ȃ\ $hc06 OJ:7 5T܁'rR9pc07~4$-jV%s4n>0P5D^kjEiK@>W X vg4ta9EG#.\߸Hu"Hűd *ї`hILˆ1F"cCBcSrਕ i WAYemEY]], toii ڒ*tBdn^NNIoݭn5pp#}HE=rSVH)C5ى20-5 XŘD4)AaT2gQʠrل 6Φ|Vnߘ2&g[.'3x O'( s R%fvYsF dņnaPW%_U#\e ͧ@ALfC̍;@90P·ay{zNymhI kd簾#dP8ώΉi&cG"qּDG#q` rZZn Yzs99<>e1fs!_6g0Z_SQ.“&@FOF\N6B6uT ˽j!gE89qS8S^^Du0b97xyi4Cx=φt5#XhG܉gVou @T3@oaR.V>E?p@g`#FK$l;%FN8!ӤB v SSwzKOL r-bT(.R#cd>9n7r\rjw{7xXaR.&}R@fDo%19 -`( {^ 60*<JJ)F:vEoo$M}/b/q^f}gP ܾ>Sv*)/s{#&YCJ!ùA $+V왣Il|wyr *yͼ4?ey+ʿZCUK$}?yICwUD;^:cH E_YɂW@NKO&˧w6`RnsM:a_Ev V}- ^[9]VYL Ȳw.֓Y~;p rukE JlgZߢLҾ8n"ǐ\!E0`N0,"kĂUB17,Z^ܘ,]w.nkԳԮa hBen =>hiO;'-/e԰%nj'M=ܦ3wo-|;G>pO' ~x؊=-x&5ʠLMxsH̨Y&-9v{Tғ*vJsunD褠&b3+7Fpۑ7 -qӿpps rZ9t?m9!O#K"DžRzv3vLߪŕ:P-ꇹ}92WX\ vYTwp0nX0@CKv{ 47wɬ^Q[~p/5.¯ɯU?~~0 Ѡ' =\!#Tc;ޙO;cԴ3ЊOq8}" ՇZ[dܬw~k'M]]Υ^ ޤ\ͣ:zONBl6dxi/|g0v /~SXVd]&՜z.W a8mj JoX{tsUD\ KP$aӅ.?@B;s@ո Vo`hOX 9~Tr:m)(ׂhMkОN")B)IK'N3r÷f1 bM8E>,|ݮun8 8n>ߥ%ܺ9Ӝ$0+ttg5m $-=6 0UE᠑c$.XdQ 0Kv7bG9!4)(`㌏'Arl΂UPZ'K!Tj)9,5Éˁcڰ1~M@Zz> 0njyZ}є$@Ұ15AUdrce=XZs*ZjlX>֩{CD !x[dj0G@ӈKS"XR% T/{Kpg.+q`0Nk gVkm#GEo69 edƲd'%Yպحf*j nH)Jr_z',57`7_Uі-pp |zm `SGˢ,zv^o`\]]eCpESuL;>Uiy$xε\CA(tP@"xă' piqjB #f5sD&F _ Ɔ<_gi#'y$]]3_%lՁ١n.*ϗ7=;VUZ<7˚!)(`|Ԟăp[ƭDD˙]+ejM[d! v"9wlE0sg 0"Zֺ81T82X)(k2Kj{ܾX.3lʹ?1D#R) PjJV$,W B$K.,ܮc]LPveFXge3E-bN,EѬ]э"v4Ԭ ݸ1T9FK63J Lb['2d  :J-Q *sLu{GHCTޅku^ʒYw3t)(__g- }p=.eH&o^].g|4{EP?(vG}yQUvYzݝb_Pݿ+?>>Ln|Zf/c4̽^涚[nv!HɆ ҵ+#{=d]5JPyJy#d$w,q}0*R=cg:Ym\+T evS Uc~u[\AuO~ǟ~B?}hq30!:&o¿4F0]/{tuiDN #B|$]XfUs[ ;<|؇u!3<};?yjV':b B܁Wp([:*ճFP>獘\r< Z_.Y.[K[e4' v. K^J)Sr&J$izsC ]jcGf:(9Ef ؤ6L `Q:ޚ5DDSz$J")!d`$PO1&$ƿL+Mxb or66훜% NX¿qΏ`r?u4_bb~'y%tak! C IT ˖e]02Ω^N ɂ3zaiJh 4{b tmĻlwgGC^7<)4=SIGs& =sf|{AΗ`̀S!.zGʅ`xU`1G cF"qRI.yô˜ɹdY$P pss e =I5;NXJ*q)zLPcI.?N\(D㔛Lc(V=- dD\m p*zw8qge+z}yDPK'UCSUG|gIogy;o{.U\8ZTckBXf%̰=%.DDk 0R)Ѝ@jsJTL18߷G@=:_o[s"5ArqOI / Qs wLOC ر@;6bGAs&XDxiyF J=B!MR:D\3.c`^(Z$1:aZК3LI\"jOiA g;B8_ǚcqvܲc s弒c1.W: Kg AxL}48D"oHc># qwӔ~-V˫YBK<kmߜƿulܿվ=o׀?T PQ[ a.LrPyk6nnVdRuH7EIsn77[NՅ^dm\ ȻP/4!:}3̴s'Ycy4*~ Qu1qj4=8aV.!%n%sd sųn%FO,XPY(6e\ˆfIj0;ٔn^&i8cq} Y&t\M/ SjV˝[ ukw}"' xxLg>5[8ٖnO:[Qq*bmiJ8;Y3A;Q)@9o ݄wbNWdJ@c=H8kb,TZ0yM}}-Unכ s>\@y7(j8Nx 4 ~w..Z94l~m Y9e&q9~qڥR 'ZdpŽ݌`2`$ca4'H4~B>g4Ϲ%+ٹd%8|1Hg<ԣZ 6PJ3.Y >pJF"(o)dXۓ -<˲xyygTU;6l8)$!2%2Қ VpF-()Q)#,Jrk e}f,~auwǷs9*_}|ZNot~gemlWqK݇]姭{ջ|/<u+n? fHXƧ&Oƽ}[Uf^,]o89_ Z*[*ӶQoމwPA +!%0.w @;7T0Pn41B4Bk+ͨ0E G; +qv}/d+ YBSOAr Z ^8XaוXޠo><*`M,q=*EAH&i$7e ~-AQmSBl2?KX(75GĄvoF)kV'['~S(ț=N6Nj[˖l9d~)<jV yOr4[JR y SC`RNGPTqG.̅&{1tqPg~b_MR_uLm/ 6~Io\? _{o'##{Y \頄W4ablbFaU 1M:`m@keߑ% 䒝^1kQ0A$ALPhkQ!I ql5!a/H2dJjէmRrЂr܃FD<2TфՄe[Ik%CJEKb<~zU=m^*ݜZ vԅ.(nY]g)|.׻[;l$=5hqIL9M3GTRns;IK hE"T0k X; EUQjcԢb: < 79Umcږ7r6U@&p4;C1l֛ڸjmp ;>J>5B\wjFOJ`gzH_[pԗꛁxqov>ڷ>03ϑ9IުLx*/DD8bGng"B3H#‰jǹ2D&B)sf[˗ؙsO6P={Ĥ~x'waL&ly-+54*qY$.DD31h'N23 ~3k|ٺr1ް4Rt0~V.5\ bKΥMО{tH潯1\P@ޘ" 'W _8޺x "L5{%JUqԢ9N(OGT>}Jo 1,Зz&>mDkK۬Z }}T#t>{&\&h'sF1PV4+ݛ[kU铟?Q?}OZ? LP-]P]׫:+:|MRMc{MӨhzIt z6{ [}">.-ڙɭz??|aaW'?ia51M+mSΧ= AKݫJq6*DAxGA}=I\ fbx&%M{;Io@,SIZ $)q HN w.xR${yU->iǡgqu ŃYwF'$JI!Qu9_C":mW=gfivѴ6ȴWuK_b(;<%fJhWu+!&@@WVDTu@D墶 @8e]b+xg/zICJygPET[bR4‚S& %H7PBTk|:k֔z.hCL$]T' ђe0Q(:Js)s />op:_ 9i~y?%$NF}?ޝR>}ܖcHI}׽8vD@NS0O/XVٍ%bsjP9CQ*%T:R5= rY|"wG8J&s%2Il42tPNp%IPaE!2΅Xl% U2 LP= ψO9 bٍQCW+ qzϿ \zqu* "JߪX `5&D`x&AH7:xQt3J=x%2婵[KST(! %5:pL}Y˥Enm9qwuW{)"|&uf<3;Y.G֮suo3^uF/קڷtմq:HYW%+,p_}Rʳ5/ܾ2{L{}s1aE" ֞[盟7WqnCHBf^|^!q<1!TZF灠B@tNDs'/L9թ'li!PJe^Ӥ<@h&;LR$3.%ㄊh_xgfA=$`OWIolVC}$&fJG4H6FKDWr}x$;ϘQ9&5N`GdUDYcD @L+s]=P)$|$ņ- Li!;Rɬ$L3w NڀP GbS [9͔1uˁ Ldm2,D:# $/s%e =A5[NMсPmBBlͣ(|ryR:;#ixGvYʌۀ)r7mr,W.t)L` C5J]* ̊v=Ic^,=ֺu|i9@^GȽ{[sK%|Oڔ~?ߤ]^|ALއ \ao}?:3[6NrvOͿ7û/ÖTM;b!#>&c u|A-ڑYdFW붚RRfR\^I5Vp; $Rq]ؐ*F)xNpƅ4ۅUաeDYۗ]oc01еll;u" 8Һ<\L_[#Q'C% R00 :Py31XŦ/j]]ev!s-fO>_M zO#F~$z>/ǽ~_σlD7./]K۬؏~}7|CN•vm7=Uqs?kYetrsrC$.}b" jR %Τ Q娀2j=+!7w. ?n?6V؝:ޣ9k`ˠ[3\HQ?? vw'$N0O'ku'-ruybB5;z-%*av)թgҰvwٿm ezVi`_isrk[{zQMm,izOy[&f]#O x?Dl͜;s9o%W $h `x68Yy)?> pk+M Z^9+I SS4"t 5  c v4diD,nb('P )jAasŸ9|wZu(y)~ m׮(|{l 3f7DЩmks{!1B^wX9 "sW$c΋#qv[Dxg=_FS%,9^$w v{5+(vo7k` Jn^Qu?x+-$TWF^ptКVBS9L@[wŵtay2Cz q6JP %&2` ܙ!L(P)1Z:2 OW; WnjIGvm}{cK7j:Ϧma;w"V8Ի} Shaut +ҷy4 UMţ:hwT~1tYjҭas8K 7B rT?7q0ڢy&r;l-}w jI.t8_hc"1MP )K$E\Omrpk1Yvtw dt\bA%TG"L2qJƯP1Q#((-=r?@)E/|zQ;E4^lrGٮjsI,N^$s8=W_=E˯o{I͙*wR5ɨfi<}VԊjr`s$K C}NQ!ǂAV ilBKXb;z ؜iwK[뽴3g{㸑Wi}x/d%A'Xa Gњь23,b)i.KiiYf*1IǏKz{2QXOe_D AC2% 3tY4=)IXV@S£DC”E-Pب%!|HY% ed/HƜiPQ.ke2 rB57mg XLڐ &N:I$!%EKŎĸdSzM'I^^4>{ܹwm:[=-!Sv ѫRuesQ|8ެ̷/= + mfAGb,*.0$8HҐj3#dHZA >,TLH8:kMĵp:9$K5feI%!8hwA]:xV 5tz#z'o͜D9Ff6m:xubȔ!GB%$z0C="%I _"o R* rT(ɱ-{FΞsuB=BWGk:Z(;FC4-7Xd}'Ćr=%.Ϛ`Z l@28QxQ堵%h` .9$4ZؿG<'2A) R 1J&C"c ITE ςϕfA8a`{A?C&2#8M=pd!pJKƔ-GZcR@XG>d j5]ʊ9ghjŬeHYgm Rt/gZ`CzsV+R)i#Al2M"[lb)Um+{9 sfr_|<+VV>_oqRkgqP¯2͎G[㣍&|㊤ s\3f`$RBNx.{4 `𾺡Hq1![ &8㒉 v2Cp< =o5JU ܓڄ0AȌZ;]5+[/̮'N Ac\_O*O~UuMiѼWf52죚tŵ`&aܫ 9!#i2sⴸLN0jj4ks}ژ~>y{}8V%oWD`8r҄ȿ`nl 8#oFlFIq6`Or91{ɖɩ9`GOrӨhd\7\F?Xc5U6! ySSMkigq{B7o~M~ۓ߼;žwo_ӉcAO"@{pkoCC+¬g c\`mNyøGe(8r}8ӫySP洆:4 Mb~BG7T&UFSX\Čf|x#dM9[ZV:<&lS~JΗrPJAXbٸ\HVj !EER$=aՍ,ᙇX[)BdJ<Rrl+Pk@32rl=-ǣc}1~;pb8#FfsCױ^LN۬XzLs+&BXZ"&0 >;hS[|$i>:$٪GU&;s%twDFa3jPQKOB#-Kpk8 p8ɛH^|=3^&جWth=Lҿux"pMSFuц4y]w|Ow/.s&giHɝL * 9>I  ;5t*†1h#)>%K.)IA)#"Bڔ‡Ȓ-,JwVIt+cC6X|AVRtfL k L>čNTYk]HS^LDDW2}[1@pA|ɑʓ ҖNƞp{efxYSr]޷A9́y07^/*bAᴻr^zv Nߓm\ʠj)e;];r-s>(h2pڅ˅UϫCYډ/tt0еzvH뉐2֑V*/%O8 g% HC uQFyEnxgzH(EB=Q0S~$T )cZdY(nZxM&(3=`V\yuJq!Q:OdBuΐ5ExY`00E.=Zvї-:ءޅR:DSKJu''sL`I<`0IlAjT+O&>%,݇RrF3(BEfzw"_LC4y?'oe7֣~^+D4ȵw_ߋYj"dͲe:Oq{λR"Jw2&4V~ /Myi_c^Q6f$ow6D"8`YyRZ#F]ތ7F 426zϥVR! m +޶-DU`CL )@.2KəeNNm-k3U㽱YuЫCobQs8^GDF>L_ѩ~T\Yt1)ËI'qm#Kg&r+ia<ƣEu̎ sQ6أ5n_Zۻ'v;:]QIsZԲ[=/|MyPR_lotr Y;:enXy7f/4ӟ7},6in t\2D%+[gj{"S1V W,md VMm-b%ӈiaO#r}14cx`^TZ4qBBPF:|oTk, *ݨS૙M4oAQ"}ӳuŬu}s۩=)ljFϖ~]t6l]qQWT5x`>E͟Vcr4Oczw]NcWV7 밢X] /G R/FÏRq+?w~ zz.Ƶӑuچ(ZX+k,QL3uާr=&?rx Nk|Q 50s8¬@"eր^A _k΄ЩԁӖDq ?/,s؇>xp9,7k5j9h'eŔ6?g,(v8⛝D=v-or닧X҃՚=@MA9 +l^;[>YW]ZЎet4F󫳌k+Åe"_ !5,Qc\ٙ~&;cٮ#ȋV:\u׮nANDnWF2j'& OFW7"<4g ho>XyߖM07_\UZo;\ Oy^M-VEwuҨhiڠS#A kϛ&p-+*#rHT5Gc$JJX5Iz`<9ÄCׂ{2Zj-4 Mb)IX1TUFtP>q A"cF1 td9UTGoW Kn{d`^F| DcS:&7-U60<9&fRD 򁵆k!N04vFiPI  *Di(eI"1QTcpmVeқE i6 ąUB|5l)I19ULA^% V`%B%dY-R[59`eh%m9 -O%PҔ/wㄼ[NUgd x*WXhJL &&:r2p⾥-i,ɶ^<`" P ˕3VHX"zR !oil;E"aB0 ?%5QWTQ6%OL tSȦոl[**d @PbC>EՌE:֌#] Q 0+XtdH^_Q@8W.HPSAHiȮLD)ԥW㹛.%e@Q$"/=o2_U0Vr&!BIX@rvPXoX+1sՕb1N x3Vnp׈ ֭]b_<1ʣ8I`>b)*)_,KN!'xğwaq g1䄨G[-ky2AT瑵I/ƋA G^*ۖ dIKDBKX LÄc2(3(v5N-ȗjj,(t_@JPE"&LԴPyEB>X\8 U:?my EduJk$=|P@ʕNd{`}maM$>^uEr\O.I"N7}lgL+v*DZhQ0RX"2⓪ 0tճP\cGLІE9. ҵI d:USEce Otd|wP*@ o:k/5s%F56R FxX@a}˙@̠:= h]0"#I )5A=Cm(1 qL*aˁ< .Z6M}6DUK'`ebBu"JI-bJ$"0@d뢀) Z`4rMp]huEC@Tv"#DoaL 0@5aW[h#zu7^]nFrȊ Io@2- 2K 9~?{F&bkPƒA?H;{ǽ٤9y{agC`/_!O0AZy"kotHyqyv{qq>n]^E۳pxFpy1nbHDǾ|_!nfE>څ|?mM6"A~A>fv-v4kvx0}xnk>euݣݹ,}wݞs|6u;ᆲ;/}-q>]:F.qE͓5R}DP Va'vm -\6aӞs刿{d=0\`n1qu34VeJ!UR[Yt7iep..J㶺}y5aɛ{hߣSycs5T5^L5NQ;=g3Cz_I^_{q~=)nhÔy=M7`v6tVrӛn M7XMseЈ>;(R>wu% , ԋIЊĺ(' :eĞJ;s-IL*:a WHrnM-Z7ް5 L%xmZJtԂo߂s8^!nf1'Ǽl{}F?xx AUwqyOxKL; nƳ^V4m#ݻ r?~`FtzLzI$7Mz$7Mz$7Mz$7Mz$7Mz$7Mz$7Mz$7Mz$7Mz$7Mz$7Mz$7u98MuZiqGs={5u"^rVJו뿢rݣW<հTMYû޼kktE> hebcuQzt#I:]A)c5t@ (+IjBvVeV*x^dY傘kvg܀{ge͏>&r Njqiw0>/`{^뢜ݗt2E%Q5lnqd?#2=_c܇NMrn_}OfFk}wplwp~Ifs5Q㻾,`Ƈ]zC&t YVu_꺏]D軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋軋Y7ʷgIMKjDd~5o־vdS] GqE>^!sVbZ"oC6 x7tł;ވ.~7/˯gtd = >)W,73jd;"H[NHe6`!j`fU?U#<&?NB(&ǩY;;R:G޶mpMg㵏oO W`$QfmJQҬ]az|媌BWhߐ5^HC`isig`shyGqK puں#27\u(ȳɃGخsGo*䉳@vOuRz}5J%+X9}GcP'oG>7!پw"us7³z œBkc^Pjτ#ZDXƆ ?{؍Oذ<%!@EV<ݗ,OH>u$Y,f$:U*~,E"F>.$oi%,YȯX7dc%jc=AjiҪ.Ž`C}SuWu!G 1FKv:>%4;dTbJTbJ,[e+l%IJXVbJ,[e+l%IJXVbJ,[e+l%IJXVbJ,[e+l%IJXVbJ,[e+l%IJXVbJ,[e+l%IJXyj tkj >>ΩNr}P1y.8$\X咒if"*;_^VPwk/]eq)wURޡ\rWY`/]\ 2v*KuuW]nL>dLs{)*KkԹ+Rގ;Nezbw"q>zT]LJf]+YSo4\O;BNL?S*j|iS$9p֞tPDO}/ ~iPDO~cf!{wyyQܨ`fM-٤<˦0AAB,LdUZZOfC?Z|oE5 `pMN23PRȗw۳qi/\dI5>uB=ZM{ Zu`r2 #ƘC e*`>i4y6k| i8O í>HeUUe}f&(As(Hg8 G^kjE4IxI>`0,y" q réSQruA;9~1;,.\Vse)UɽǝXv ,㮲F]i/ $Uw~cJga<<rdr7_V9OQ=͝&aX| _G `W0!2 G3!h N PmS fL`VHќ;xȅ0ȌdOOH'HBR0#;ad5ό8"Ù0U$:%r<8oA #24$!V% sWV(#%&`[#6ʀ !'Hu氼}r=4i3y?wZqi42:zDX(?~8$G[3G|VNE vUDvK$NWAV3ڛ%3Tsп}M0 4\3C, qB$# S?Be~fm뼜ʢz_ea9 oZ~`ˢ N\\ҁFӅw*?!mGn!:=SʲNWyvűW8=%Ll< %9Ӏ8"NZˤ-[ u"^IAHm A|65_^cuS5}U2v޽r(lo}|ass׹[=b>˨k2 hu5= xe͕FCy4%wixx{-ޡN0L6xŽZAwbm[:6~׹NjQq@5|f! -#GҳH;g_jB\APF%s62蠜`6a)&AlPƄX$SMO'ɹ{H΢:_,)=~, xs {g>-pilrk2_Vov.P9|>*<RtnDž)h[Lg%E7yB┚"chjUS+<g3L~F82!9%SLI4 rqFDÌQ3Fp!iQi&lDx㉎F&("8J#(+Y*3g~%][rtoΠ d:8`PTe*yD S b#)5R@ZIԹd)l+9Z>/3dGXܔi+a7Q;䬣'oN㸩;d̯ =o'=hpxsC Ar<3G4aʼSY4쒲(d=(rl Ӿ}/fw;Pn ЛxӯuJukbl6hS;ǸaG>+rSe?],4hrZ$ k $XKL–zcncA+N(B Ȟ)D)勂 xb҇UL8깦BqڄQg4YNA'N'o$ecwwݑwQg霟Қ?ǩ%J:AB f*p%@cd3P^"ň]īk-z{#V4Wվd˹XuYA+kmߜf#|r.R}gi?4_3~Fۚ:o3{at߄7, G{A>V)lեL{N&#څh 4魰ޞh5D,=Яy>R';ܝNp}K΅n3Ĝ6\JDk{k^Jd;eTvCV ˪T9K9Z%ǰWyj hy Msw{kL Zl/C/QD]]6ZˬY tdg]-ǮNlX(^(6ĿchtyuaQ]WN^J %4l.N.dն66z{p?Ȇ9`3$Cs;- AAz4uD1V,JyvG^. 656]GiSpq Q}n8퉢Z(962oq^۫mR$vㄹsp M8,Olr0kQYѿsJ'}Ĉs,)b#$"։9iO|TP4|-^ce(6 F"i$)DK܁dz15'҉>M)&vӆ2/Ӽif~.'+dߓv&'+ɰϕ;}@'$7v5g9-T KZ+oMcQ)BL0Mhje*\!.49#)˃.V)΁}u{zϵ|TH`c)qbDCOm'U3gg܌RN iƾP£'G/ZryPjh^,6B2(Hg"A$U7$83GKZ`sh^AGnEqvy;[GVO^q"cII#( >ϥˀ#B1vI;bEŒ=y&4J# gNϝV;D!Rڻ I  gnjQIr@IoId,0Ǟ0v\"<ǻ@|gZVk=r$ kvQ9ҿٻ6r$Ug'ش@p&Ip`2s m.3F43~ԖԲĉTHWb:fՄj+ RnV@aeu\#sI^R&ƜB : #)TD2'Tfi%1 UAP$cPB/.- &/u@F)4>IˈtĠdYdDs"P'Z \e6$%PNFȑ%k2dRYQ(iphYA&`rJp<4y Q) ǂ*ҵ#nVwOHdӡ W`'.. F&?dT)_'ϯ/.1Q=KӋΧrnm-HW `vC%=1.uCv)]MYރ f0b^f2ѣYxxM{%huA.uX1bϦtvDFRK6)?{Rxe^8s_@}|Wz^||9X K× $@|]˦&]zڠ40j7藡&;q g& J?do>KF7 b'm8ͬ&xĝOd_r,d':[EO_8)~>;)ӓRg'f*Ttࣷ?م`;|9'T>nֲB$9f'*1)X<2,zlpU 9+-rk,]NDUN |P$m0&g$jz;gy,'y:-O'qx6ȱ <{W~T|YaՓDI<&8 {RCًwo򒅫r:ݨiiܘ݀76%u0_wR 2qﭗ^`.ʸ^ NrlhsiȿX̵Q*g er+gI1iP`^G|Tm:8nʰKG] Xc0c SV ='6Zx`I7X lilJa^iSz Z{xb N6]ڦ)r6yN!PhߊRObշS}q*aes\h]Hr:s *>jw֡pk>\^4RI$aW8DqhP\x£HajUdtZGR$D/aFIa/#kf.pT4kLq(r$XH[mH# bDꥦTH0@H8H7#gÍ5.G!e˽b.X\/6]>ic lƉq@vkƊY!XH__d ńXʳ|%S 09ÛLfJŭzқ n̴sO^mM&Եݡ YjA, VrI5+~|hkԠlf BX bOhhk]M'ce0Xl=ZglG۠^V=+l~h0 ]E@($%j ZN&`۹;xԸ=Ŗ5Sf"XI HJq,Z}@%61I .O1Mӓ_Kw{فExzҌ |KRz_;qs tE*ih 5(O+L*w~7{w;"V`<=ch*`B:#HD:M#39f% \4ZÍ» 8M(rnS`Ew9lVJHH!nۼ:mtmKr8s0^]-XXQ|}(Jv/6m5U=m}X Z?~ 9\!(4[JL.0"4-j/1yKL `!#f)#KXB0ιdO%<8bt ,YydcNp1E4^JJGnbi1gd ÀAN7qyIzՇyTk׽+Lm|@fD> 6*P9%{9AC4Ӏ12g(%Xc: o#[oj4ől_o| ʫboF߶#7phWͮf"MXvnj&*5jw5]̈́%i<9 gb ¿^w8\@Q3%e9&Iچ_tI;K]VT cәȾtUD4 NѰܧ_O^]TٝӗK;A >YoQ1Y'vT޹3ǔl s,ob8}ԃ5sκ ?ڥjwo}>ݎ]v|M]~ob~/lz,noU`jVnRn`[Βr>pXɀ-h5Uty E4:x@w+!n%sd gJ:{dj⁅bje/6LKXzSՄItQ2M]O 4L"M$U>qIͅ[MHnD@['Tf4w1IU ftgLe>[+(ICh;oH' 0v+[l3{&)^;TZT5/6~3ޘr5F%QdfEGs\uX޼,TDDE_ۢzlIt2h7FI@.td.$G}7jk۰Q=06I9ݢ@EIJS`) ΃**zQmbBsI k(^^(6d3T,j|?D=gi YiGJZm9+͐?Jn=G)!PMLd@UMIIA%=x(o'1< d Σ( Tazz4ս<ܠ$='Q(y^'%p$$'i L\ZS#Eиp;0^8 24O{)Dh{ JQOx) bK >|6! TVx[\يT-m{;m_ŅNN[Ja$U"G~V8S3GIF_xN[BscB*iںg#Z~߼^O. f1:_گA>kbmD`8zRE"oQ*iIȕ# z˦aX0amaşp*S|8zwٛ\2~$Fm+%BŒ CHX"y8q,lbX_Acu*ĉ?zu{W爎Bvt~xw??{Ӈ3O>'Οp&4y>? :</;<瀞|85L1_\'6e)۞Yl՟CZpd*SH|P]@%CM|Az'e~tWx]6}dn߻D94bI)TĤ,DIIڤ`\smw.xR$aʊ|w,31x, ͳ.E@H\S)ITC:%S̡]rWy[É'SҮo'?jgSjNoM:;4:/iB~gy1j,k&>o~~8 y pWmzM7{RnͯN=|KnTuo9 |v<(X E"JV.zUpkortysR;"a*'|nNin[G"ϗ凙A-2-X)[~T}:*G-j?Rfr=k{\%ۮ "!C=F 9W^6Nzo K+Wޗ)]Mvd6BW]Yχ^O6[o_)vC&uj $B&'>gV:WNl8j>*{CeB -raQoAjƤW"Q%Ol8)x h8&ަu:A^i;,L;v9yKZ.Y2ϳ=T<d֦$>`w=)xJ\-]\?Q&B<I2g\JI~OxƁQ' nXO VP9fPu.m0JRH\G@(Hm mEհ{NLsaCPs6p$ AXH*1kLRx;B<"ru~-&x(t?bCL0:Rjn Dʅ`BpD1< -@oّ}zߣ$Q>+58O!Y]s1 #b @'ka1$قkBdpPݓr|& CPd!,4J"q!Gw4G46u\Hf]X!m̓V:ś)M^ 7.qϏ瞸u`TB !('~޸GWX Ҡ@YzfeD ޸ 0YG'l!gk˲Bs}lR;aUs#xE@^C],r#(ld!V1GEqk\`x3hz̰J=\ɜ&psZ഻t]{v"nD*EzȦf s_am\r6sW kTilH%#<sF䨕4pŪIYQZvuY_öv$iQuV%0@*oH6+sZv.B=9qQ:T̗zVjijT4RPQ0`i?*8c5>PC=גjÙF-SvD(1Φ|$V KIzn,t9v$AUI !=#:(|bi˴҄'ֲKOklE]H}-\??.sږ0_ xP DZ|(|t"DLNU-b`ep%9jZPU jaV55F^`Pp*P: A*7Bwp yA ^+&D{j-xk)W`XPqH ]+ j:Fo3THY"!(OHcem5ptr &;w+]o`+pV!x8^ߢx/ FϬO ]OO^M>qtnk ׂ6XyLW6mν^>e9 .u5ܺ[s0͆¾=~5e-)n}Χ=/\9GssQ!qןnxb Sޛ9jΦ?km|ngY8m^Ӝ8@Й_#3KD,K[gJ܍)Qs!h@cJDp%чbJ );4%F 9S~(*C*Cֱ]dK!]"aW\.]ehwvݱo][GDHPdQfv̮0eWVݰAi]#t[(_2-;p>w-ST.%N%[]egOmqd D‹$h8\>nYRdYPuPGŒZӦ4Tϧ}=y ۋ7:7zeJ#ďnY-aq_n >79T|2M_5%VBq6X+~i ͦvf.|ƪɕMò#YJN$9|pf8 ?W|)@g 퓁B!Q9\2N#|[; ^9; 6 њɃ+|*Ro<}z}VVsnڕ=ʮ-6HQ{-X.؝ P9^%l啷J›{>#q3 !g/& Nݐ+Tjh oАJIH\w"rW" ξ_\* o7(J<'qE3Ү\p.P+URV\}J3jo,?/oˢ?Ebq_h "X2Lzå//zTKӶ[ f gfmفQ[6vۭۨjׂlR0tS{NghP_:>N { ~X-nN^Ns?n4+fyZ,kN%7W(̯D{N^DXvcbffDC2 7V|~f ` f皘"ېTFI?+^~ɉBx#W04 9Y4},WHՐ4| J\fe< eՓb6b5 ||'.7 !@!gI-G.<L2F(:AI#ArYP=&ǘgm# ѓT&fEt)bLk8W2 %ccl뵔]ejXB+ * or9I[⫍Fa {+')So5qtQ*cYҒy%t w֤ S'L Υn( 4 sQ0齐MM5D#l4=6 !e 6])r6Kl?TvccWںڭn4Ө<Ժ I3$݈V3$kbbA$0hRIFǬ" dT'\1r6ڨnLE0Dll*MѴM+KvNfkRd٬ рEqNb 2r Pگl;kW}i-<5)pWug%. ${@2@f0J^>dP]x{chk,`+&mZFԀCFWq~UJe1o\R?y0|y<L?5|ȏt׹qߨi{zrrlŵDqotzth7msKU~W& )}ow&#Z1ys7Sb,H{?ϟܗn?&s+.8:Aq|r{=+{O+QRfq#?`rD~ N-p^[lz]1V@)4ұF wظ3\AϧujO';{sEO$>}o˿~Ͽ޾~x78P V`$c/ۃ']gtmuMiXQO'|~59>ak~ $ί_~PfQAOp?5"~WlbO暪=LTO!(ޗ\ A*p 6& [i}Px]6?$u#;BI<{!e0)Y66Gdd u5x"s5mɼfa&N<1zɈJ-1o1KÕbIkf-H&g“^4r3JۛS;nlOwYqv.jgt5 Eܚ?B{@@G.MK6~e@ky:;/P)nCqn I@C|BWy P%#h:B){KG[Rrd 0j'*ٜrFsӹ)Ǥ2 ܈׬\ĽsG&'~ ڨFI'yȨURG4KSl֓ 1k.T>rjâ?9u$߇0 /ϟC:v xV;z!Hs)C!^b(ܣd߫ QC)TB[3[,g4jMs5MHxW0C#a&kEd ZxHi - tCr4i8k ̺x ̻!$erENF ]˨uja@eHYr$eMҙDr(Lef6t#gˋw)ٹbw; ˛a)O]v!9;C4ݎ&Lϼc!G9Y >EIud!DgvѦT踋ke,W٣wV'˹zK&Cg4399"\tT9a#g3/ճ>*cl'|)`M1;XmWr!b00մ7|u2@d%,1VT:H`h7IxqvNG+vo9g<@#2^MJxs^|{ A mlmtY`F5YQ;`y"S(!h%"*M+M4 f놻M'fx ޓQ%!{EIlA?SٙD졳pfM'XNJl rpIG MtBbi|`'23LR6/PGb:$A8fJ3}| \"EUy?&;թ7`|:eG"~ ~Bm1X8F=J[eT*ťTT*ˏi\}V4q*c3gue]̕`ֺhv=jF|ժjYQZZqz)]mi%ظY)n^5(SʇI<(`↷CeB6ꅰ;^3H"2Fd!HRh] 1ˌ3].Yqth\*rAZGd|ΐ5 yYxo00E!=Z$t@B%p Ǩ*NKJqT MѪ/b޼9z9- D!6D,XydQiR`ZMoO5 F ub&p08ǥVS2QBQkd5z$3sR;lT4)z6KrEf1Z̑읕*a1rVm ]}ݻx}WGmYa|0V>?n͢O MOOnV=kI uCWMVqtAdߐCޮzji: P)f Yћtn}˛m›Cɕ_1o]=z߃kѨηlW ^ c>)}~ t ,6!\k{FDl0= R gVS,Tbz~p3zx ݛq9GO z}uՄ{(J w^T,ӿ%uЃm݋TecUecWeUeB$"VA P9sAd&d"?{׶8l5Km^~96c8~tDtT2+sgZBLKƣm#m+ԥ@?/Zoʜs|OWP܍1{OFAeoW$G*M Z^9l%)a Fnۚ`-5XX"PNA+!RԂ"q=sJ *H\@D 438 gRXfqT9* uEV0B8|˞8CS{d{2›q:& n8Dٲb1EI_WyIRKv/!zcp-fn!s/W5j_;W~ӔQ#P[*JUT2̅rF9咤: Z?Flj)Z) .Num{Kw^(/ BoA~_Xd7хY .|0 _۽g6/,Rã*b[8m2SfƜ2_."o#z$jNYYg!J}Ζf{JZ@4(jޟ8\Ny N F^^e8.YFP4W& !(tb:S!ORi#`DًzqcI{ޮ& WYFȮ>#S>$}D(W6uVݖ扵V{"G1Y\fi麡R 4F3%+X3v7* ^UVuF7W\#_HPQ1 w:=~s&*"OC_`ӆ;|@蟔B1jԷoZ^#xwL&nkOU&$23mz3}\5V/@d[Ws?' p'AD*ιYR9m >70gPN&8ܹ޾uaOu:k֔z.ha9ݘ&?ygLDСYR8}=̰$vGLxlmI W5n fLO=.9 ܖ-Jtf tY@1Ohj9 qx{v}ڶa%8W3l_ɤeäͺ?bOPfOmO/Dvu7wSX?_:d}s}P(/fDM^`{ ު4)E%f%nẄ́{>kC2*p2B%=5^2G;~KLqC8Z(ww?}VlR܍4)#JbIsƥdP8YaSt٣gO (DqŜi80JRQk")Xh`1qj: k{[E )9zmg̒ =i#>xuP_KoS[9&5N`GdUDYcD4@c,9ӎ"B31RHwxpVnɹ2D&LV`)8GkJw4ņ|L%SBqt":OVDt@Sl/_[A2*F>VDHPyEǜf*VdX 6@uFTnIy^ }c]1-'hmǀjCbS ͣ|r- uHf]E~Sڗ>hнV>h 9`oP&($x( ]**$TefVDD?$˵ǡ,opk5[/Jc܀辊l=B Kyi2+bfWH]ArJR= (^PzQիh+X&˼@XdVx (u%E-!"zEYnL'-v8oe#l+>y(ֺMyuNc!x/T% r|4$9/Re۠ORMu}\ic߃~tv޿#沸c[GBBU $e/+|tr@9Q j4y|֭Bkf>_B }>S]pykA+P:v' R1q]6L7"Ey:鉶~'-irR.Bͺ_-еC9cj\{yYoz{˔M˨CYf͖zM'b1Sz+ȶwS2G柎`Ƙ]XfX?>90tKbfEjZW5{QGMcq̘RePqd_$]Mq) 6,D tңsz2}yOI(E)oI˳/ ŷ3kQ^c)j`BF4 W<R*m,}:{t}Bo>.~L3}UW|QLv.Lu<}Z')'. ,l꼭-FISpmd?˰w3{`e"1o%U$UfrR4IeEyCЉpBF$ N)S"FIb3)FZ9D8(54PV;C㍧:c\k\ "8DM҂U 8d#vK5p[|l(/{!͖b׆Ob2[ nn+UBBw )+Bj^HYJc^H_'~ŘV"oʎЌYx*i+aY1yY*Ls\p5~.Ϥ7 ^@pV RyY"{ügZBLKƣEIs3΅H©I9.Iψ>1aJ*S3bv'PJ~_Zo9"|Xyu%w=:X;h7SƁPS dKlr:;m swZZu+!GI=B xZ7.?"G` `X,Wy2M}R3(Ebh18o3 RD!(O'c!Iӿ$PnHcSx2:.Sd1 #&HtAG% SAR(K(ÈJTd|IsYSe dj5zNѾZL*g+[(:ں+izɹiz#izn+vw6E6;gvƎmOQ B'P?qQm1,_cUkNҹ>X2B7!7k BE,A=#^ڲ*=9^v ө@oHyQQ%2d .AB霹,ǸV9Kvz2rApi܆_NqpE`Z-p0] `J^h0mZVwW{^::?:|{,S0e=yX9VmWT2yT:,כ{+[^3Vِ 0_N[={#Gidw'@T7x|9|7孒,Ц7w[2]pEЕ'1, d'9BV' 8ÔU,G'#݋礞++~( (u24^!Ih㼥L5oE8 =i*^+ Uvx,TnT3){hN(c"CBIιfu掠G!Y\)ȳ h@o184b#lur*pVj"gF{wgyzGh)mYGu6XՉd}F!V>_7JIݸ9H󍐦%e!͕ ̨1F C8׀P2i.I63!#3&0<Ar.ϬhTR)!Jʅ6c"b~tZrW?Szy;yx15Oel4̈́0Y `nU%ZEG}T{TQTGEQ 8i4iMU8\D)AJG9w\hu+ 8i#*f-a~e2-RLT HӑY3sw5rL >6W/7QKFSn  ݇15zz:UÜ9a+BXbAVb0L:,FڃNm4g%'!ɪWN4:εs4J,wDFa3oPQKOB#a[(x;lk+__w5M>ȟn78/)/;"DPN}/+E~"}" P^qs^5ɡhbt .'-K+5q/|B|B=wb ~4,G4_r yP1??[wSM/Yk̞- )|QlKEٲ횳XNiOWlٹdRUw9^VX[9n|Оwe᩽Orvk^Z>xZ.4?T6w>jց研0koQIJq9$#aWc)*2z#鵽 ״>yaX)BdJ<Rrl+Pk@M-OھIy_oR7)M&}('RtCYD˴mlќ CSA}y̼An\2d8 eiQc1%0YI0 ]2F e@I>M 0Nx2FfD:j_;>=Ӆf)hEٞ[|xH/^L26:&)XWƨ%fO6 m8^2B&e"DY*;Go!uOӞu4\hWeC=o{ F6klS!g^"yE6ػlɦEROp|rRJŨV|X4/Jjz`R~,ny?Xy ͵렛-gxC̙~1n}Y[nQ~@sav~txu? 1> ̒.妗< }YSZiB8,nWnZX"'xQ/!-%̷˺:~c╅bm鞯X&Z^1uWmlzҖZ6O n[Czx L'-5cr;a&zu˵xLx?N|LsM+:ya |},qh44; M6{mpS|aJkp6Zd?v,|ff4&m';ɌM줽EfDx>a{'N]Y"j}튾ff7mCCQ儷㤂o+V#^2Ls0{%RI9R;!ȬXWj`C٫}ZR7vZVjs_VM{޶X_EeR(1CP7 wtPV7W7pW'S":2I Zo JQ_iܻ({أ}5gpΘ͙94ҾMx9'N:[3J{JhF y ]<w!&OD2F6H^ާL V]<+.S9}V]}mxFU@\ZY2UŸ,FNBdPk7Y&*-S}寰ɲdEO;qq۶}V*5'\T0UF d@wd *& %B ~F.aCb6N%IZzG:grT1ye@1d .AB霹,G(KI笑 $8:wyX֩^9B60JAiF >h0q&=q8 }UͯXN{qw\~XtbvvIkZe2G2,dXeC-W)b-7Lc/prq=dzfh L]1Xxt hgo4< ƌD#U䞟AhwL5,`fv(l ^?ߵ~%yMDU`CL )@.2Kə sw; BW+v:EYdR&P,{-2Bf ,mv$K_˩nYlf(&.VzQ(@rSjsKkR0X9EOUWU)zy/adװD'aM[ -gē֙Fܘpީ ,X#C)UU+o9. BqV:rE`Ac}$;$'構xHfrQXbLL.1ٜL1gRR5c5r6k(Mta5Uʺz]ŷ9k]l}IOn&q:t/l@:V n'c#Ja `H[8+Qp\]"h`2!EQR!hHFd2{̺,!8#f95vҒq/Zw쪵ue{#u ="~ lt"D*F42'bUڀ$r_Uf 6E 隄,jFMt}C*HF5+jlևQH-q_4b5U#׈k gLB&FYn;td8GH޵q$e~V MH:5~Z<ɤBRzėđHZcĎ4vW?W)(eEIQ0Lb9@N4B##"8)2mD4"g|XQ'X,i.pd(QzM4EuĖ#z h %g{5%FQ\F7m ֧m/눹-"P=oY3_<> ZH}2DA:-&1G\Z#msP {TzXTziF%DщD 72lTrŤ*@f佃 Xoېzi268'm8;Dɬ1'&/yМd2Kn dL񻙃?v?Ѵ #IUPZgUVs VDf<'X"dF:k4RH2ңB@tB8K.D+ZIU,’ >ψ#3I*AZ~ɕʓϊʻpQe\H=FM%_/ϴ┦ ξ5<]R!.L;8+o^H?[tqq"Zw㯻qL~1$kX= 180\Oĉp_ޜ G$?K[M>5-oGfS*V{e3jFny놏scA7*bG3ҝ^ΕHFl?NWK9QE6-Pb{zh79 qMy`|Qel\[c^\k^_',*fR<vߺ|t>[as$_GuN~ɽLI+YѓveOR-nn+Y,ickD3Xft>5S uW:^Wj>rÐ\R(7$SI\YXITk$dk4ŚXzLG;yd R@*'s`:7y>T& сff% Dh-5 SF0 :CF`HKq}H3ME; 4b>\ƕ9aџ:nVl /įM:v V;ڏv4(DJMBRLzBr-Jth\@.4Ojm;ϣ#ի^۫Fjs 펄9rQ$*Mk͵RSPaLq1%nSF3Hn!YD-py@ bv:,܋ujHekE4KTs'yr(Lef6t#gˋw!ٹ;D'Cj2!n6$GmXəFjQ^=L- $)L# I': 6nF.ohE,;RoIHɪY" |1'Gs*4pRclzwǃ~WEt>;ZzEi7,)p'G0v<<4xyCQ'#ɖx0 u"2ڍo|zTٻ ]]?B6GdPM̊{s^|{ 6rA6Qf6: ,0# ͨ\zi!D`93ZʥȡiE)rDa^/@Ӊ 6{2̞, SٙD3wfM':#}G 8}#Ic4VP*e@Hx"3$xXis  nD;*CbN0PB=ȍ1#Sdd)"j 9|4_jK12ev-8r28T\A"iE'@`oyxlƩr-xEXm͒;Zw܁ʢ']GS Y8u°JiAVeG ӫʼFCePV;Ht 7!y,=D2e9OPP[i ̲Dęjv:п@S`R@{|J?:GӹmZw~6.zIe;a pgyCPǯOM?P x6, J'Xr*4E˛\^Kc3ʺ+άu9.eQK7[ǫV5?VjZm*]wҵxض֞4-Kn\ӊ=Ҭʂe5(S!JC!(`k2x '}|&T!B-bD0 \ 5x%\fwflPP.먗/P6KYf*pC{o/70y!=Z$tuKB'p ǰ*ϝJa/6R)h[5GbG4 A# ) !r93ǂRqC B!/7ĀpCŧJ.HtFZH"VY *S VnqAZ\C1:19 ΁Њ~J `yԚf)$aְIɥUГRyoA< rY\7l 7FΖA“"&Ho4SV\|u.&d}U[Ϸhpv/oox3U!vcLrf'솦'MFu4k+tD%m\7]Z AC.&jSz񗷫뛧 74:{gCa߮_?ufEoVݺ}^77>1Vhy@{no`mo?׸_b`wF&`5ßm~fwӝ8Bкe,j ˭u%ǕR#r%h\Iy,D#+[W t%ZPF#W`+F\GsQUUV\}/J^" ,I\=`+He{Wa?UAV\=j5G/XB.V&%:o/'ԙgLtџ�3ƯFM;6:`lٷ=n-a.GzE6sgƧqy cO@=G 凌reƏ^Pw-> .SF=ӂIqm~0 ƈ~HT|,}L.4{X33^]o@8&;q 2WiIrP%+oArodk;ӕ&ix'^ٕߠM7HQ6O/~?b+,p,$n:jMȃ\q,@E8")b4rW Rjx\j!x5yE:˚$!4j1fXF[_m{G(Y`},_" S>% `ZY2 HI>^/(l,!K^*aJ$r$__](]zH˾ݮg4!qy m7iv}\?{{9M2k^fM|I|J>H(^?$/]+?֨uɓIjYn&'mPHNEתfu,Z'Tk 970sN6L]\FT$w.9\# wMQH GPAiLPxVF 9D#rr2~ovuckA~yn2=RkV>0DuU`oj4K;p#SmYzq>_'Z) :+D€FY]j;K hkc$(mr>qiӭ%'2VGKft_ю_I'.i[G}廖~6?;|{v?zOHf@؝խ y1mY9 \o\\m5p8*[,mzGzү68D0qr5/EY sDXe~R~#y%K =йZ=ᯰ[F~ϡ`Y82B 1zr < ^Gc0 lwY,F[8},y(MF4ˁqL,PL.xa2-Y/7b$y 0B7tNq5DkKOkEbҶ!G&ݩV' ?&w7.QHZ_͙e'g%GnM,jԆFN[#RT^[\#PqƙsEd; & Mz[iI6?]6(pU%q6Y9M4 уĒX> .g,Dk2 ڒ9%frYXmg+ ueYzYx Uuį/G,|Ni6vO&F_G÷izc#JYE/Agғy%a5Ho1I$diQpŹ4uAH'i(_dB"6ɨhXf2Y(ʛ0d].֌SvǾRTڦ=ݴU~\iw#MZSNറ*Lx*3==HD( 6,@@!H2>ȨFBeyX5E0D?DD%~a[ M$oT XQK0˴,'M0sSxdRd&gcD.r&BLĊibq&2XR R=R4IQprfڨL<£ xJ4o,iǼEAΘm1}FD=̼:^ 2Kdcb.j-e %ebC91 0+&Q431I턶D.,MbpE)5Yq(#X3x2*1oOEB05Zi׷qmmUew̥cDzZ ꐣ1.k{>hvZb Tifד&@֠V˜uªdu\W:ygy9WH:4R {r{ nRj8Of\e]Ȅ 4sy!̨M6* j9 YZ{ҠTi89owRM0h\2:s|:k^f"9$\ecHuipqÐ=9 y458`-`\?H!ugEC\eNoB0 YJIQ_~0Js/|yKb_e9 z^0?.h%?`*wםABI#i>̽@ Uu1g\2qv.lm'YN*Φ3Wg8gh)U{#d6aM61yrszE.Ѽ ӧ9;Tvg|ouAz?&B]?.[ܗd[|Ԑ Nҥ,t |WgɪիM?'.K4=*&mw#ں>S{⃏)1^I?WpGËJ?kNzY[6#mpGB%#);GRh);Y,?[D3Xfhn91WنkxT6|ɮQ;ޕ3eN .y izʣo8΃"6\Ϗ 5ӇM5'~?l8_%m?8?c>~? O? SAe'"@Wd| Cxa5֩֋ a\jrǸOGmôR-Io~ßT O9pךXH'`"j^a8,S[*+ͽDNWЊ'~+1ӕT&|?[ehegZw#Jb9jA0P3` >dd0T%Ap5IIomXʼe=%R"x6sIAQx!xT@.K˵fh )6gy4"T993Ğѐr4y{hkf/N;t hzWKlj_KvKFu/fB@ARF).4(F+˔`zbUQ^f{pqQcr|F !(')@u&[au3s'"8|dzhR s I x%"g')5;b>o\:ݕ8aݟ7C^&.,t[Aݼ] [h R/09bb 6!њd-FY|7I rp\g2797h(Upd'x. ,P+NFbX*Ogtuݙr[6<ۮ-#Yy7|,xyLb0hْH8gP5&I`$0L掐I,N+W]-Lp6 B""% и^H-WXgYiE4dm dsY 8ˀӼȜ2ZA@9;^5\DwgSdTI#Y*$T9X$0Y! by K.I͟BSNS# G %RKime9R)!@dF̡ikC푡 !N3m]LJ,h r.G,"0'!d/Bm^W_}K*S͟y=hd"!RD؅QPyFPqp&\ '}e#w)}Ի$;jjN/Oy!ԼP+75/T꾩h:X Ř40MWt(JMnF5ϮFjbJ@J&$ !wM*qSGq,Z'nBJ ޕ#EȗYd2Y{n`1IcDm[X~Om]_t/X*X_uDBZ+]qM^ 0].n Yt*4+d, :%KKTƬe,2^J3yً4tuKC?ŹaMaUve/A-1Ҽ>Z&|ސ(ZM0f&D+b DTJne ryA4Q%8gs %M3JqCV&@(.R9'3KKI 3ހM)zcp[@L3 "Gp;+^iU1vtTm~9Azweaw{v~lYa?To?mfmv.=jϙ\ڰ.R1] \BIfxBlO!H+j]ƭד 7P)fMZVԲɮ{78snAyPR{7}>m|{$$3<NygkfKs:imf᜷٥}> h]dfk:˂{ͱ\QEaXʣ (*s,[=Bf xl1,* v,pġ¥zJGZhn9tuUDhճQW嫇&0vA f.UWvAT]=˅PWuzW% /-:>tnOr5)t9_8fu/W;LIqD?:Ѹiټ3T : ΀U'A15 .="5M G EMn ?t5]U/PMI]Ѩ®8uE ]].9k TWB)@qDL*ry,p+.\V]DuL*~̮zw7[?/?M窨ނ)ZJ5{v9]^J!1J+h+RDߏbf2rz֍=qRDt0TV%edL(s5iF3ZKP"5&ӫNQF3Ę~ }V#@:`>%ZPֈI0[s&ׯ|( UTT& 2gL BW&șd-.YbI%'W|Rp28MvE$S{2<^noGC8v+abj >yJ \Vn6JVnRU۪mU*u[JcQ, S׆jP൭7 Ȧ6G%t5ge-VX~p޻k%vPqEV̊2`6^yN">z 0.q4CSӋtZ-zۄ^NGWŏ*a$(aД˫U+rJ #i8u'>?aؕFӤugܼ1үQO~2<|gƢyr߻|yQ5ǘX7~ gZ!u\~?׏o~~xv R(UVY={]rMuMM&E 'EȚ~e6m ozmwyjP*7?ikE-^WmYTn]PsbB P.b3=P@x:ĭ(jܾue>zJY8Hg]<^JSV$"h}'AI>m꼪҉T#o,#/uI1`tD4R!fc{cx;mlO9xɍJ;.LKpU뿴%샏 0FLU=M=V~tJ_c**)?rE쵓 1WS!2Yd\J42i JzBœ{y5'0Q`>J94^+0 gd1:Pw]zٌQ*gRUtS*ʴr7`.eEO]wpsdPUn,`c[>7 *+9;ڣɱSUXrn>;ĥy9v )c.KT7Sʇ+to(3A~^QbJX "#@)3܁_<*֏l,[hȲ}v dHD%/nV:fɽ9[/ub xJO!D]N`FBsuQ9m-,ǔaJc *ɣ.EM'n4Lmп솛ͷ^xpsefMFN'6+=X2P'5=>i%p$hr,D-QJ+ "&Fw>hJEL<0%nȯLĜ0م7\[deStYF[@<.|\j>.cdIF CJV*rz`,#˜ˑF$%IJYR4%f܃㾭Y4à8pOǁLPB{c}wp;4X)*D XMDZ@Wy].1Dӵ=OQ0`YzpJ48-j7l>C'R LA@.{X6B ]Ng\]HKa*׎`&-re[MM[W3ПWsTa= ,UXhA]3ρ;1^})j$xuzzRdj$3a*oa׉]]ûIRûO~:Prx<ܹ?^vz=O,is!D٥I͜cL3AVz&ҥg*|G[<-2{~y r:}PVOH0o ={LCEe'='|;,]ӧg@ajzqQڍޤ'?_f/jw>_}O >}orv=GGeGj9q6|j0mN7ZHB| DuLq Vy,=O'#Y2xIJǝIBny<3.1rYR ۰ T*T7B(IJRd\.2k)U,B%fhD}bRMt:$^)KwP1Եaf `K_;2LM3;>w^6_+TALFM7En't*u 9 } 7!y.3ϣb27p "Y#A!^*R"Rr&!r9x'SA+GFdn5pR1,  tFI*=LT#fLRgaK7ҟT%33hR"҃g姄OM9JA`JpYx:lgh a eSPW';r|0޴7%WoqUމͦÔ_kYUDV%Zr.LL:˽hhݖJ_h`Lerlkk#'Ӄ\4)ds`ҠKH%}&#U2Vge\R^VBU<8mXuٕ^r?yYB).vt3h_WݳV,ݒOB0 GJHj)`H D C#Ȓ?8!pD-pL&Ǭ\:N爙Eck%z.i\ծ6;ں֝ό:,=$i= J㩐4aD9&OHx^fdR u3g@BHbȄ AQ5C&!#Qp$EJ,F}e{XM%3b"VZDS""vq5w$Ĩ#e:)yÔU,GkҀa<2]`hTD]D2bgB T|7qFb\ Xj%^w r^gY]]]Aǫ>܁Rɜ1ZݵK`e(bCo/νUwM(8DNBRfWMq8'49RFQ:M.V݊nhݜq@r; SޕoH[}mfMklӏ0 GGqR]sGi9uMYaTޝ>MK&#nh2ֿlOr sl%?&v wM$ΗrPJI 1koQIJq9#VP 9BRG0yM>Ҏ"mpk1E$Mx!dJ'ɚj%GoQg:2}f2ѭ&Ά]cCKo7s<հu*n]O$gKc#t٤Bҹ}Ԙ_ci ML:td5}wjѦɃO:$QU it9j zGA6rѓ'ʑUM(zz<e^~F7xSL,Fl I'y 昼eLj ҴnE'c;xHF(='4dD Y dL y/>gYx^X'y zDǑo 욪U >6rh[߸ R;:"1-? n_J7ҫTz[fM~X=mTЗWjxioɘ()gku)KZA&O+sЫnQ!yy1G*%x]!H%-ȫM x>NuW0<"k W> }H)Yfp.d?)e;kmHhS BQ$[!n z*c^ZV X0Α%BN‚5 '_TʭձJ1= 9ڏEOHZ'k ,fxD[;1v26]cM/=~u[bP_JN%" 23Lz^RU m4SX~, ټ,x <#$<&tH{m!('i!UaϫWgA?4?g^>8:Y!HTmLE:خj~_x xW}nj_ԆiکSQ*m(|=|KZlbd\"Ӄ %E %G=H:]Z:,k#Zvt/x|ubkyL@LUm\k<j":a]pZ .A%+<0LQwU!>E)cd(oYWwU 4@2}il ĘƔ!h >;A'LP苐_jBQRyfR<KQ*TRWHBE" kW,h#@ fl` + kƦCr]>~[Ce<0~H>zD|"%EVsü9ی~Χ-ʸ*]e eqj 9ͤ 3w3y&Ϥ#t:TK.Ŗ"vZBA@%$Vf:`u(tLD #j+oiS'h{Qd ?K8P'N}D}Sy)$(Eq`eCR៲SJTLKWdr d(|d I I)٣T9B0&6%APAS"H2լzK (C A+ Mrœd֘53g3 bLx%{/+-'_zW O2q[pzW'ɟMdO2·`BR$Qc+G̜2BT%gg2`o$aq73w);+b?o{|s9t`B{2\_5ؓN7$yIvbM)[:nuYH! d&-Fӧ]}3~~{G گ#Jg-k;TDe RPc|)d2:}^ϳI݌(盐7O!8uj^}>?zSq)Z@tE(c&oJVg": Q1q+^#N T PR^ YfR ti͒ EOux3V˔|r_m|>-m_BT^ Ï7Et%#)(-!$ЮVh4r6squ!uY =y^WF)e.9.zՑ T=089N~[Wm) )+L&8!G ʤfQA뉼3i09QleOzZO-C%Cz)jٍfad#^|`*W^ɜ! EzAJ;%Wn)LY獨y<f0~oy˻rDXk )-^0?g<aަ#(C\LˉDk,ZLKgo:6nsvw7py5jʼn;6)NFCMnx@6mBۆ9iwqŚ7.Q _E7cKtoNϚr6GT1yeB'LDɌ%Jt) &KNȚwz6}8z+h`_']\WWYwKc[y ߩށفc>nӜZldA.vrH:ځC7Y@YH՞| kGi39G69*` VS !EDYB)9S$sbKSAyUڧ졭"H>6, 9.r"%fjP"]udc%U$tEq13 ESfMEkIR05ǰE3s6|~5OgjߏelG1~y1Z%D׷>}zFܘz5loez+v?9)HY)@(NZ Au&mRdxJ)2jkJJ%Qȵ#0XT\s79i֢"_Z#c3s6#zr,lbjԱp9ַٱן ٪MFNOxOx|u<=# 1 %P'rBs䈙L IJԍ[,ď>&j5MɀKp'+@.%"@2g3bIZ .yǶ:D6SQZfijK,1yf*[ y#@e!s#YaIYD lL>Tg}c\FyB_u`\TZg3/I4Eq,JvNIёJ!(oHHoh#:.vwl-.xP.?͑WOb/r}ʋϨA?˙(Oo}ߒ9?LVjZJ LKܒ~g,fl')\| "M.y~53;<bWVW?0ۚ9~ߒ ?ha9lXD!~,X׳%kQsu^' +b@0wzp>" $t2A)M" W"%ecW[u9[ a(a i`NHCȫ9/=9 /^E{x6#7ݗ\jsB LN&*}*Mch/<ň%*yJ`O\-ܩUVLҘWoHrqJpU p2pUuTuWJt"\ Hw_/̇'`įs-DGG=NzВlPNT9V[RtNR$](]`Pery\pGjt·ُS4fp:}&]vq.UjW&eWK]\vqlaюDMe;Nꥱw)؜޶MZjWKj]-vRZjWKj]-vԮQvԮR;"vԮRZjŎ]-vԮg 4KzZ=ُN[پfha0{ic$qCr3$Iy\Sϲ˔@);%P]Jp/-]3$dI9Oּ#̔[ff+eS+?rۊÛYꯨ|exY-C,x s;yٙ;7[kYHrϯbnqڲt$WŪ;PN75 *QژF*' H*x|$T1.i$4qt3*jx2)JkrW%ub;$,ΣYw.`=Y܁Gѹ/ݏ{y_4fѨ߬6>,d4\ȹSɪd6ڀ22 Y,(>yeB!| /-JدCMl$gqzhV|p)PFSHng&"Sߜ31bC( 4uŹ䒰Y[͒A^o`Z/6gD(^TF]k gM5~T :?oMO7Tx_LU 7w_ƉE,Y1He21 Gi"#EھO"P SB%ɵ89\xBi ۓIfl3*զ(Pm.s/mw-0~XPWP<RN *?N/Z!Җh_۟-kлw5h0ʡ;'qRC'w&>'Ϋg ƴ(=&F5c7<_ḌgUnn7=/όJ6ol .75#76 oڔW|XOv9ӻ6ٖ{qRlou9Mn+4nJ!/HmXդ夾UTq '0_P{m^W6X}_~}x?|g`c+ HxMۮMxڣ{[Q/&|vQt9 >>-VՑn ~|qp򞵽Nw&k AZ~r).h{ǽIxZo"f#A{o _rW Gėq*I{W<52ŐZ$Q/ɇk"rQ2\!Y]ꕤC[2idqXJ#w.}A~yR+,h-1{m)@02_tr3ͳclM<ӓ :l|ʐ^#Fy#ӣAW5_z% Q?}A]huf&eIgxrA Q jt:[gq ^uYWiξzdSKƁY48d ]|BsS&JHYʔ6J2C RD!9/|@PT%8`Ppk'=HGQף4\=rߞw,'8m,t>OQ]-{$:[J^;lIS[Iý]\sJKS|)u2ڈ)47t>Ok|8ם[VX^DrMPP*;m~6qr y֌PdzҽU7'sIc>YK5$euid$%(Jԫ6REcJ2rHBɩ̊0Q8"|NJngk(6||=&=W>_eEz2aݭH.u3P5;K4ȐZKPT>r2d> E~.Zck*(o LH=\ o0skY!UHH&[d]Q{”g26&$X`$@sBCA84Z&)' ]ɮGnho4X|6JXs@J I'|@ELt)wJ1 %JlKD"ˠr)a$("\V2%ۑ} B}01 օPA'\DI8l!*^DE'|:p}fRBƜ}wTrH$!TI!9ͧ@1#c<d^a}b֠UzcPo|%sËE !yq .6 "FlfQ#ٲxMtNΌ0I&_< ?sYb=;Өc\[-{lv;Fl=J{HJUkg_R<˳sU Z_D 7T)Dvv!vaUQVvˮ]~&o t?l{:Hˀt#4fdU_H5+*D=5 (lB!M(ɑu,}$+qwV_JU*&0*%'bTVJ({K$lЊzX4h2[7km)*OJ:T_Lbu"BE _iȎ]z:kQ p~-tӦ&:_|>H)!+ALiVo7; Vސ]o7{+8vސK7; 1 at=.xA9>OW N6&p +/E:,l"@?e 0.@ T_"eu ۜ)gt>THBATI@%Wu06Hӱ6Ywt^$8AƳm띲w|G#}}},_6;}:ξ8fwT8zɳqJ֮MEn=kzknz࿱Tt3)nt;Z^=OxU}alrɆK6պۦIwwWPV^R{lot3ݘYCxLqdF%`4_7a}K24' zs!Yҡ%bW2%jԨ^>AzVȴK48.u 7tN5H"4lDk`@_!J \^d(*ZT%F~LI:#U֦:&T=O38o2@[09:H(1?{JZŠσrC (qBq fRMPBZ){zԧzT|$2#(AA%g 0@&FJs)E\DT#I>Y4$ИSͺK:Aug7b8F7X: =IW-VX Gv'/ d}8nO!8^ eJ1j0Fr!i!%ceV)C%g:v2~7}#Ѵ6~V^Jyb!~:tM]ԬvJiԁ>MY6G JѪ^y!',"R{(TA v:/mwȸiI7M'~{-V Gӗ3F'E1#3Y I%+dH[ৗ/"Z^,c  ߋ`jM&gC ʺZ{3Z?XPQ!yeoHĐ6fKsKҏSF|'߫ZZ%vJoQD]4p<1NI-DdC EIoQ6`O7j2;O(zzI%m@8 AR9lHg=P6|*HJ} gq@w$mt /< ?.zT(]Z?,9 6L^${*2iͻMdAQn$XVk[0xNtz|\|y߹w{s|Yp /sI1CL1PDAtReJ1Np5ݷwّq @; RGʅGħGMXVŘKqo9iRXw+ocM?2 $p.:M.@&.wmYT[Uݬ|HM5b'%8Q,8:}" 8fw h*adw Kl6X XLfRVE'Fzޅ5جC&)[DWXs p%o ]bNW% -j]`ۣª-t2utPJ:$4f /& lp"A[3 yQ~uPy_dJ"ZğXa p)i :5?rǟß\JdrmϚŨ5δuP:=GJW8.EQW-NWeGWgIWRqIۤ:o\BW8ʀЕR("TUZCW7ʚ+1FEts pekt( ]b[6=!!dpJE#GP>=B"je֌n?2MW9d'wCIOl=ہYG6=H4Q &/٣܌\22?k%oGUF8"#@٩-n`F|:4^{9Ρ|ǜ &cl2L/R.Y*dps}ZajBEC}potP1/S.?!7]9k`Fjv^>gV]paKMԏז5vڕFo> ُ\6F E ]pmoz F4*ڵyi7)G-bX}7npqkc]J;-vZ f~z-FZI=J5'ŭeB^ė+1S[[6, CFЅXtN.z\nP3~J[D/֚jmg Y_V5Mmaܦ+J޲6~iu8̶'m·XN0Zm=r-0m۲Q:oƷC7ia\Lw3#-.5=)2NB^\^^5fs6y54+z)ӌK3s Ymz}'efv~S9ʶ}eEXowmh -REdS}}iyw&jA޴-[ N LT^gQ^ [K/wdal=d#PB'1&N>Wgqz5l[E85Kp-@'j&)Xբ]Xד} CKQ.pHcּB|Ơ} nӦXgLy[b-%'Pre"1KN? .Ge ljPcw+M/D;:XEH„纇uq+޾ܘihlh:U-4 hBNӀ MQŲXI۷QLEQ?ŵB.Tz6nF>L;)c6&o}#u+$-d>zBDX1:ƓۡŪ}(#l|SyXЅǍg*ʥsO+*bn ͂FIp[R&!wt]&S !,fWu31S{=?~z/>In"&JјĉIHRC?_e9Lf29ӗ~.Rʟ+x0(7/ G~nU"U߰6}|*J@'y6~ ګ"}Q[*idj|^@_o9l(ǘ& '`Y Fƒj50 9G s199[5/prl'Z'|?!<cB)7jJ9Od*N)K%kMIH/xM"Zi '}h~(G|ذ7_fܟBW>n Jxf=3Ŵ[ގF t?tMgEgD]K&.VXL\,ڂ9ɴ6pd6..,KZ!}؈:2RMGc 252k`:sx>vKd?ڛ* yϮ!Y{hDz:@Q?% b|;}Q2znw>_뮏~#ݯ]/%(Yu~W>w0'B =X+ˌB r.פSΨIP1 %& N=EC rhD& v`"C,G, n$aLB Ćq ({* ޒDJۨ’TX)r؅`JJd 1)JL0gYJR0赆EPcp6 l|wZ'~nP 6Nb<sr.]YB&'M W6Iф1k&C(sƈr\AHby=J,o#Ro/aNeU‡uדb̪YPiFv͑*-dZK1jRm@K/\uKgT rS޸;ff1/'_#b(2֌NYI;8sv8|uUm6·BTTqߓ56^G65- }.;?wm[4c`ʃYX-S.E4a%3$;`ATah  j0\x&9d"lpĉמuvD4?2tKe$k8N_oZH( ؾՃ7R9_jjrmZY>DV$ڕNJ&*JvR؇xOHOZ6 #zY5ή&<%VE.Q3 8{W\gVL<6qK%bDT)B 71;ԔbN0ͮp0JYōJ +EB3:q0T0#4.M&l~?̤Z>eJ׋ESR1^=^ WաpӅr8l A*Cd1YcGRuD;ok2mD1q+O*hOz;zTī <9ۆ՗Vo筲R6B}(-K{9E_4ٷ`c|e3cQDdp/v|s3o~/0EO*ft|rM糃ꗜ?W\y66]0ofs.X]w J1h+Şy{Y]~CutCcpΡiY͟M\lhQ{mר++f5޵u#"=6Eommbb- >ֲ:~;,QܞcC 7Hh*<:?Ԗ$+hr~0GlQդޭB > gW&F:ZKo$A0t0/xˮX͠ zWz ^HJhi .CtU!*(QD@N ģМS|-^`&̹~<"gy b<P=aonxh/By6NrnۖY_% 4b8i)~hjwI|~!8à22asќA r2&煜8*+^Q$XS$k)w˝ ڮ Z4?*py3?JHԇ*J(klV41B4E螂aOSR;`h),o,uLQa\c+SeZ*bDSm Ԧ4XQpJ&f/D/(18 %Չ"..yg+*,_ cV6wROf6KzϣTJ I#j1{&P\C+t4˖T #)b@ Zi,f5O!RHS( вxG""d 6Y4$(#(d$A5h bׇ5^gx(lua;iUo:0:H¿^PQS "F')Wa\$ M$S(s x$*P "ڤ4Q \>]gI3T[:O)rksdz,%[EA EN/LъPhJ,;,r $ +DGIətPG: E:ӋЋwliY}8կOև{Pakj4uP=r7Urz5}FQlokPZknO~PA'mC%ILk\; B:c*!<Wy%$uм˴}[3n3N;YP?jBɃ`$5X9aVFA!O>Lf"e@sJ>Kl,@/xk%xb!sD /tH r+k)vk' j*ֶ^ef>vo/|[{mvKw7:z!K[G.0 0|\Ź* hq$4H44UHťjgg.$Ռ? m!$jE"T0k vR.%;"$GEpςuiR09%ɾCHvee[f3#oXyf孾zs# ;&! pV.Ω ">7GȋJpT&M**kq4gg4 $@AqB)aR;hZ٫?v5u1Eo|d)l][jMX.s\ĥҚi%Je'S]g[GT:vipW sΨ5.B/WMo; b^(DQQa2VMB]G국V .Xe ؊p|tSzwzpwbwv].P:6z*h?3bM_[ 519 xau^TMQɊ$FpA=,&б3n:niA2 WW&juV&I)%cD5o1)7ύ@Y>.\dF i;D*|x|'8'ѓ'.#~9yIs7ЛtVeg\qTC{?k̇S]S@ XF _O<.xa%1VpK?Lfn]풓7 en"'w7h$#GGUm:uefBa8`żDc//'ᨌum+%BŔs d$,}21F =Q߿ԮT!Ot6{u'TGg߾}og|2sߞ}x 8ԧ`-$h; ۉFv {֥fCz`%) `\AJr˸_KlV~oqkǛ 8:$(G"-vMDlpeu=8 b~ٔ){aEM_RZhpkbد f<#Aum>osS.{+}ki5+Auօ7䥔M* )g@H)Z9AT@ G34W1HKHznoLU<3pDt^<.1Z&qM$:%W#~uarsu(uĖuhTcl_eۣu(?x]L`;JC̺g?ܘ!]>Lh)ݧ!4y)N]wjr}5( FR.Ubu !URUL'2d`DNdғc <-/"E;QO=gSW9fk C/[#0M]?nފPǽo$zЌn[fm~^ Gikon]rϵ2ox;(s }ZiiiVa0q[I8}G]D0ZI{'Ϲ5F08?n7j 3S`7*.M rQSG(ET=rVicL}=5L$pQiX{fm&y>-_vp|yquT-SQ7k:?>><`P&91~grs=7^sƽjmsU{w"A{wjQ}j7~lk2,;$Km lbD~,(hJ h]Y!l%QQ&9vsCYk>%;mfPwQoI OhDt0eM`LdJh rqcKL" ~jL#_(&HfYp7 ?FWIhQoqX,zsztycCv>G;.Í2T4qFVᗣ`Nui؞ji)gs-Q6s+ᬪbZbiPN;2[S:OYYv'a ׯvrhXhY.)x#'\T>& UZ:iw^ެB]^򙽣GyT kILq #JM^r%ֻҩ@:r sGIR\&AUI !#zj ļ7;Opc1rև:iIQ̸N l/~.53Fn,/|S|aɴޢ(!FW20ǽ-FCxKb~/bi'-30F ϑ71VFTC_(m{hZ.:Q M:I$֧dwu%Az"_ ! 6AMp Ew4Yop{-#{,[)[Ϩ-vw)ꔶ$bHA!9Rk?D9 $2C6ze}D)(9ӌ¡2;(Khݷp^@X\M'@՛;j)F K`Ȋ݂A`/YKDkS)]a/)H񔐳cR3JFUZrȮ6>/‘)=P 9-L"u1+!U({tG"d l xI9uu~u8ӂ_sPB&" "Q"8/J Sd4;@w *okz s+I֭6ËA}8t9Zɵu@mj6'_UE\Οɨ5:W 0Ȕn938~R[V_֯R[ 68VHnk1C$- Hc 47"]9S_FIQU{^E?,ZNq/k. =˿s,5cE~Xc#u E_Q/ޡFo{[}omjb{[SV_w{[}om{Hזyփuԫ??aj?,<4ͧ^0zcͥn.K\z7ͥ—BvΣpzN, w.ޙVC+.ޅӻeN]8 w.)TMN w.ޅӻpzN]8 w.ޅӻp(QOM*JhDC*ѰJԣDl{J(|_ѳrFŐ['[-x :{ZK8vKeTqUҢ.HEBQ*IS7э ()8E0I\{SNjSOy]7=;]5fV2)gTD\SjIL*LId`#h,!$7ˑ[{55ƱFC_;ˆPgLGmۯe`7vw)mG{V)H2,>:\}4ud(Rvw(j VW95C03ɥ>oYbu~ "1D9V͛MLli/zQŋ}D&yǀgL6hlN&ApTT˔$&>Y:#iӳsdGCʡPPlGo|e^mn土-%6 N,ZMc$#7ԬSF}?~Kl@hTqp¸)!*BZy; z Ek j$(@|A&-0*&7JBP"DETB@u`fT֦$ho+o@/z^8wU2yx09/$`KtΐF C r Α3鈰D4C݁,V`1*Byg#Xog? >Ad_Hkv<#s/sf|rYWxj .Ou'%tQe Hsh6"hc[i5Fz6BOGy6h 2&J圳EE:!DagOAwi<4vvOg/|oGXL vcVVA6ڰd*Ei)voI`~tֻbn?ZJԳHQ">=lėZxuI_,~pE hxCơ/\R=5Q^ M~Ib!w2=Ԓٲ6XǪr=Ƅ5N ~p5uƀq'3*'K)K:/_ͯB?I'x8^\ԫ= 6 6!|Vk}@.?i難K(:dz4˧4^/8~[$091wn 7ߔ~Mcf"Cy1|MMn4o!o+7i;ך`u%ݞErnotsRx>yV'ޣG[*V Ccqs#_^k~LP*ֽ {:m|+]|Y{Rz%ؼm.XAm0x)'fK%m;m":!}_u=)C&"*BH*9( E>ARh3hQKM*~TFE _Rw-%j"0!hBҮHjt/fӦnk%yzM@Z`*gÜ?_6;+b* s>]8-/zN'|^i\prr!ǿ{.?^N^nX1cCu=/!VZJ7k-l^AtK~;YwY&Fhn1wFTO#<>Z<` hh.eV9>Zu>Zﰏe| >]B^GɈ3L.lƔ3lx7T;@BXEHtO*U]ξNci<i]^YquIvs=`N(,Xd_J6_"tE(Gs 1(@8.Pb*x4Wi6-5rٵ̦t}ɢ FL!I(A#fpEEMQ Z͎䶝pKMP|"]3eP)Q4v<9͢"Ǝl8O;NoK,g2%jCkf'\M/6Aɲh=^6y=r%,U4"W!Ǣ.Px>F)Q; ,(; %KJ͓4j!*`̮)2gK%FQ7sQ2P6e]),ej A526؎4f+X ^|[Mz2 ㋃Ūue7t=o~0LN.^?M[GR F/吕^PVc)r8H(Ӷ1r ?,Q*YaS FJm؎"z1rɺ֭[ iĎ˼b j7]Q4FmQ3,^Xk$3^'pdAr`bb0AV@RA G Au5XI$#Id82F!梳HT 16x8z cWDvDh`̈́Φd(D$-aAC$BiQ {@rb4 *[L1+mF#iV j>nrV٢u9񫎌!u6qEc\t;.>ZϡA)B2+J>h GG&hGEd踸\<Vֱ+B[<\pB2o#\ u󷞄̆Kp\HBo(dZ ndԭz[Z2.~C.ZDi,uO+|pN(#)#픚d90A 9g4bֹ *baI’޵#ۿȗml2L0`?ֲ$[n=,%Ej[Jn,EYb21_J8(giwv5k{RĎ~ݼH*ۆҥCoM[T`\L H\TEo+3Гסh\UD*H} D{WEsuPΰC mL!{BQwf&pcwg)L7gЕ0B !zqR|.vjv2iPLx7o_h.u+FDU=}jfEĺQ 6 ǂ*Χə)9a$ټj-Wɳf|QnwŽ:5:ybVoՋӫe`Pͨ^~UݶT/vi N*/AJZJbBnڊ!pJ[YV0h} Z0bZŇݬ'2{7ѵZ*AKUԖb_4$?IsP .e}; 0i8gazSt:ޠ~] /0]o_\__%&__^}V@ LKaZDp~;HA-*ʛ E/ j42e[}0|8^Rʘ[ J~z1z<fO:pZulʐ`M.+E}7_7)\E%VT,fdw)Dw f2GAu*xhB fIh,-,DaTȣ(uz*@QUgw̥l$=nl+vf3‰3 1oW\0(պ]iJcF1K)`)$$Gq; %h{^SAYʰBadNq 1hn1)+֞aX7'6ZxKu˒ C!fHKp6HF7g"6 +Jx-RUH]0+)Ӊ^tM`J4>IDxJx]!j}q8H!I>qW:4ס/@sopP~ңN[qIh"#Ui)%Q<3}Anybuy =`!EBR !fDk]%Qj<&`JE8mHlrˆ^w͈d<)VXM^>RM1h䋄Cʘtȱ3qv9[xJ}>>4B+`QE}9|gz;L (^ۚUD Htv nx; C//7]/`q oyG|/dj8 /zF$|rD{qw6|3O&N(dE!}j\dߓ&bC~a䐤XHMLc1>H1WOpvoWl{g^aNMsa%BQ-QOaX/hd!"1ZGe§7ee1BWZu(œ~e4y,Ǩ]CI4="rqIڛ_bq䆷}_M6)T| b *YQ64r5wW`qBzL&erj旾O/Pi>b b1>nm^?J".7 ae< K/BĞ Lfx D2 ܋(g oAdx!Re8ra Fu /e&#QHYu2LwZ D佖豉hj4BZ":]=7 1A]u6蕶707n3LɬzMLnuL Yfdݬ»g=MYEϧ>,PbYܲm~$ۜe00Xqo i/`̕ddoYyIq%`^VwGry|d/K~9+q kX ~\GKxKӨuЩy$:UQAgAöB;Oeph6(hZ%T8 yK-SDcON[,-,t a8҅L,_'Yl`{*R-&C7P&'0i5KrawWn?~G28Y'wp8*^z2ϟ̽=:|"g4`[*$TɂĔ#"ᷠ (} `!#f)#I`"cWal IlPRNAУY auxv4;O+ޕkoLgtW2jc ӈZBwKϜ4D6 ,8"@,B1d;~Ao#Mr`Us ߲fՃt+>?_ )McP|AvT:.#YQ_>Oj~z-*JҀXltp ` + kTCp䲘Hb\3#ҸZ_:١dEa=VJٮ/X NW nٕKg0䊺68uYК=7[h0,k#7ͷy3;d0͚VCʥ ʸ E`NX,ft; 2X8l(VYq}kr1 &2 T׿5?[eoç0_CH(fAE+An&/ ˱AoӨrX`#gBcPĥQZ(k[n#IGۻM7:/gŻPW+ R9 9h2Ej&bY'OV4y &<4ZpZa\Wnx ! E)9/u2”ƶzX'nlLI:Ų '2'0sC@h5"fB4`,g p#S,XˈohlrXF4îF Z0O84Up;#4pZ(ϤmLTK`yUm4^$A\;e"nu@xbhdr[yx)-ʀGhإ6FuLh4(?z2Z;Ho 2"eJF/~ !ؘN$Jlƚv0]ib8 q,F%4 3K7 T*-܋YuXPK039l"}׌I'9E aC:)hL69Hc|c:]贞~a|M/s e&AS,` b+Z`8L&Sn3q0`ر'B-eXaOw 5$AZ\UGh d駡=7fh &%xKDM:39^nxВ1?Hl:)R(]&D*PHyDBH>"Jkp'r5숱Y_ۨGɰ&46I2'՛ [W*ϕ+E3)o.: #v*DzC!E 9RŢ4(JF|RL =ց4hep?{DmX)+9N,k >d WMIA1X>=FR;kP*)śZ TfĨ88Z#be 7`93#a#g@2z/p|2G i٢acvN>rIbUrd"&|٥#Y 6EL !# )l0p9( .QvECީ"DoaL`j?nzF,v/n\.#9dd&}RVf|3o:m,|$RM̭A! ^9'6zb1nA%\]z{MeH{(OpKV>[Ɋg߿+sLZZ0XSK jfjiZ_}--X\2\0 d\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\uU\u`+H )}i#WO2qWO3W+z}ӬԯLpe 2]pG%7;.?.ٵwc5hб7$$ DA ˅{H~\-cb,/.,Ր=vkwqHU* gϊ⛴_u8Dy|K`z<>ۈ8i',·Gnjq~ZsYu`Qi_',y$#1)S4a$S7 KFڨJ6\]=ӑC rPd?YAl2*H"X糗ɧ;4A!]ߟmA/LWF7?XHpQ4 vWYZɩ=&S%+ZR*2–g}Y'dK-&cG @d!DZgv8 F6K-@&ߞ_ ez_qBNR&F78kqdUBIBAY !W5{H@X~{=0*E<:śWvQ/XqI_f?]-//m㆔զwΨǽ.ÍJ(t0Jrwfl[󬮖f88ΖE]>tk6ab M?݄?ц\g:ۉvzoCrO/ߟU>-g>OIw;ޔ)׌b> -J t 37iß$m*hCpYlv{VOHQU§A<|=nc{OP)qDbLmKy(=P5 nw>r}5?eހH7&mm?20k6as#ms>;[8c^NwpܒÉ6)-C/<_QNc&j덛x )/# x]_pɈ)}_xvvY<{\J(5SNYsAyQ=r7BPA5l3hS)/=4YzX2*}QwlSlˏ8Afu…9R=k~y7R mxZhBE*8Rsrʹ(zM@<0ճa]/'5[imwVZ٣WZU>|D[Sr yǪޓpͷ0=G(M\gI(6JFWzђ8V~OF'cz2g3QOٓ9ΛC{2'c{2'c{2'c{2'c{2'c{2'c{2'c{2'c{2'c{2'c{2/ӓ^ԓ;vz2J|3=a'#Td{2Xlԓqn>VFR>ٺA;Fd!YVPK0@[dF~*vյc|ΈT[Ҿ=!.j-7XSKه7ްɅD2/^zI-%g:j-j XZlXS9i17PNw?w}J:ʫqXͯ/?׋ok W K`*wHL/ h%z^ z^ z^ z^ z^ z^ z^ z^ z^ z^ z^ z^ xtI6WJZ.ˋr!&2>y5&Eqo)=p&ʴk>ف_fxv #qA[$=/4=j!vszmP޵e_60 u)1H-IY_ _2ٔHdEkꮪˆ9<3"c6q#c>_%fӌ}gBVX8(=.yƝ"_657veƽ^Pv7x}b6bFf5O(x! |5$6ZGoTଈM ^C 'ZK(IIth/#v Gt`ZĔxʌَv !橠v6EfIdpǒ[TjV4*i/ x*(.$3,#(lagA2xdj D9#`|`<[Q*Hc2a6qa>ᰛZnc_DQD,I5RR[ZR+L<8 qQt8 e&f*'QG.q#uzb\יMKE2*Xpq3Ġ5&hipq0xÕ 9%H!8BH Zdٴc_'VN>VaP)8yVr|?p$ _]_ _]+|uovvs:PA /SQР=’&H*3x0{ofc<78uDeiͳz>w!mȼ\6{u%$(jL0u$&O]KjWljNa׍jo Xk{$)PH6 s|TͣmvUmnL&n5i {;, zw)7vz G{ K{Ōx*xM;C'xxe,WY#1~ 2 g 7Z%i~MhwH+SO(h^1Kɽ ɠ)РCq.m9#_SBUw%:$RRO+IIXtJZ͞AѪDpj^>R^7U*8]#sC7;fZ9ƧVO/PZ9ϔVU*ө5%{% i@JEh?\dH'< CB6*| i['9Ȩ3ndgLPV;3}iKt] K YÚğw iwٌ :k,:[]i)h4xeF꘡VCc%bAJC&Z!, +U<7/F < ŝ \Xu: B FT.} 4'`,r"P'=`?DE۟jiŐ B9#VL#G$2^CXV*>aEJ!5:EPI&qigsb?.6MHTʫă%$d/0>8"Ƅ70O;Wָ)OYu~|Y9S^:z)xO3ըv+bb?a:˴KIü~=PMUMB/T4p\ٻnϷU5,}0JbXA 6SD.nU](ޗ"t=nJT&L[l l քTǥCR*L\[]6^ˤAu`n۰2GcRݚD+i{5kr^on;p,b3.NW?~4}Ɯͣ wN\܍j[Ճg\]٢wohF=ߘ~uwK.lTAqӝ Bx%kFbB)5]5ͺaa8kYTw`> >Lo1~Ecvo kyT6:}uyVW:Jgaj#-3ԇOaDnAlt?x|St}{nrkP.߾߽_~w7|sװo`]AE :@GC/_1547iL'SO^2׌|8^P&ْ_>#LsTStX?_5~*X+? f~ӞtGܢORI*n|a|Hsssu6Mߦɳj7GQc$HhR;BFtr6tS. ~>K'%sN5WY׵9 y#xo;$"GȫRk8 4@xVNO.{7vd9r"!z)3.S51xdM.c 1=qڐ8#z!5#BXj"2uTH0|pH9fgǝ5>9%Ǒ8ݹv?Mxn̦N7s6wɚ^>]b=R 6/  8S夐6RAg,~!IGYy!I;~7ʽbЉ.m>xw. DYz쵲t1U@yM>jb!i&US!:ᭆHr|tPjf#\zɃˆ`~xӖnGN1je@6B;&l97 #$ x-yA)qڣ 8 νK&^}9#;'Z"e `"u[&|:< yC)QVd)r:bWY #}q.J)c*-XJ@Jͼm\8. åh\*g*"fJEAQ2x&rg({J)S{4 A':0ebf J#pY2{E`|S;!z{[3v\m WogԂi}F| 0gWgWU'WԼ||TK }\aEy6V۰g_p.`9V]쬭#ix 5ĿS 6ePLԱr:f>y6ي_s<9$ k>褂\Kev<]j]7 Uj Uס_дYv9,Y7ܼk$W5O-mCz KF2΢˙B-ĒDm)^XFl\JK< # .Uן&9rALjJѢǚ M0V ?j"~unU*^VzYծ8]呿z3si=KY I`ԙ|UT c)s\*j%7HekXgH=LKgFR<{c)Ԛ!+/-&"51-D.o.JqT%rnS4Ew9lqJrX+_"D4ٞ]?@omaJnбpT󢧮?si^+@[OQ\gr19#|~44$1q+'q,@E _7ki|s?k&K'՜QR3%e5B9I;:;a,g+`<%9vR9hܦjd+USIf}!kMRf+=CRd" (ƖFƇ}?ß~婰Hߝ]10w^A7?1'/X>@⥁0:!t^MCH %SOOGKȒtg g}nc?߇[\/cW7ﳚ|V잮W]OgW}j[tAb>PԱjM78aTEc{nq~ul{^bO7mvƗqjrˎ[v+i=]EϷwr<\[?zv;Cd~:)E/uOǫy/㳙)yyr{wJr忹d%WE3Z*"C$&VI͂ALG4RNJl![K{ ƺVAhG|ޢM>w5GΜM }K(} aP^K)Fo&L.S֘S)?sh@ԉ]S}х18:O%VLt9a!Nt"lL|Г.<I^FnAct25"J^$15`H!I,}:% u)x`ɜpr^){5}4ԴR/ޝrO H vN!E%Cg@Hߑy AO!AH|i$_!/"ȤF&XIJ Y압JSԔ=1&Χ_InCڬz<46Df!? ڼ>x.y9mƔX'(kme J8ݼ+Яf_nlra4j; ස~Jf /x'P!>LxxʹVef7Z֠j̍"H)2)mȀ!:>s HRud 5T*4eyd(Uz/nzemY<~9}09/$`KtΐF C r Α3YJO  0bHzu53[ >A`HkbH0a9O\_3pEzL>ɿ,d:"j ?ɳ2q-`F-bx+-PFو4OίzBπ<_Dm,P9Qe(L9gTtCfc^% TҸŽ*a 8exޱQ;Ib2z*FLQXb5 0i)ٮ5~fK-CG PоA=.>F| je`+([4 RXV\7dBΥZJEC !1c|:0ث;gKPu%*@6uǪ_G?_Mg˨wH)!]cn_|K1:х“ǪSL"N`{2:RUJA/.Wݤ}EL5r+J ]4Nv(#2!:V''TLO6s31Jq_DSA=0#Mf:j ڴu)5yME 0'5 Dtzv@#s*0ϴQ1m6C<{\ \®>Tmc>ww̗v_Xדϳ^ Ҫvֻff |uBl$oϬtL"~{u˜$ԾE6 !C=tl6 E}W1fS窠D'[|'Rn.N- dj()r'LV1x }LKEe^@ Ϊ]"k𮷤Yn4-$d{4O)G7>2o"ǿ{&&&[,um&qsUV!I/t ߗ-ìϻ;X@oHoåIZZt{-λ7TGQ:ZuZ:ZuVz:J:Z`-fӘ/t!/#{g,:h1茇.:4uvPV&L*UVT]νQci<i]^YquIVs=`N(,Xd_J_"tE(Gs 1(@X/Pb*x4Wi6sr96̦tEA*kCQB2jQ<$t*\QQbFS DfǿVrJ8`&P|"]=ePd4&xs&m mQ gF]u}Fy0>frr:z6%׷;VeWdk{Cz_ZxdGmC>9>QJ%2 NCɒR!Z XakْsQ :\(z(U 2ʮ S]q5  g?2*Ͱd1 |Mz2-ˇ:u{E//Ao˗wza֑Kn9dg]J!s,o)) i $i[N9U*{!KbJVT&`g[XcHް\DuiVُq:*橠v38Mc6j v G*1u,M檤)J*)f*68m,;FqжPC<d !s#[2dH.&QsY$VɆ g?J:^ b38mcD"tfBgS2Id @sP+ Ef`vZ;q+d4 *[L1Vl8k 0Us@į:2..֙xNLJEq 8h~VJ*YQ"ԼvѰvdJ`r$ XX[DƈO&C+8-x '2e^FkSăݑxK7g'Y+NޏQo}c.{*;|Fpwp.pz.\u#8{^(@I+f/``ɂfS(2*oJ{Ii'@JZ^8T1R̺2I-r1[%{VDf9KMO#`u$#W~14Ovx?}-Ԕ;>H[^˝'>tnyQ!.?-_ݽct~D5$ժ$NZLRkw@x]ҀWYxo+J8'Ol6RۍtK!)(OшY@Vy MM8]9d4 Rï#4CPɺwߏLW,Yjq}_ *;o-ηx_,'?#z^?/?}/o^_>R?w\qg <ߞD#O  ǛiXYW ø@qﱏ ۬4f;~v ֯_^O{SbȎ9dgr+LҚԤAtF-_uQR2U˹DW!}LJKB%YHTyC9\/7r&pnq6RQ(ԠMϼ$j e@$S M<(SDi*5_"=&bjk#gǛ㡳 _nOIq|B-e<ۃa"n̦JPŤMuZ)p-H2Z5A&=QF7tPhFFC4GlFV[Qg:E@cKn'uFv^g}|z iu$:ƒN7V!yT7%KrCRKY3`9-nJ%w.,x@fjn(}INe7p7NJϟ54h8d"T0IC2̡yE1\s#l M {A,ԱWKgO|ߔΟ]6) s0gE3*Ϛg*rY36Y_`yVE,: Ea{5A8[BeBzNYN(z yiY&lF4)#=PIsƥdPfqƍ' xbNzcpe%)Ĩ54ѨLѺ]wuCU*lv"sLj1l?ɪ!$7,L&Fx*H))6CAesBqk֑ruHf'AدDto=w>+vd}ξui~D\ D3ɰl*댨!cT>N{k0ɕMсPm,\Wͣ|r1;! Gy^{X;9жfΫ/őP2]rqr+z5~Q,%wOo&-1aֲUΧw[ ..AURUp{ OyܝⶰG&{hbu'cDŽˏzߌ[i&Z["hVuo"گV~Uu7>ŗzʔlJ=wk73Ugj0- GicNɜF9J|Q%VG` wI!cS JiV?FjZm(]˩៺3SF:%zY]j݌J.ݥ#[::>J|gV[#Ukpz,Z XŹT4+ALrgQɠrD(1ΦVzCq.(I"M*$ <#:#|b)ÔT'Vs8cml A ]ܑ_ dk؀:*r̡_ඞhF< j_< 0u VjL@ $4)cT^6 º* ^I&DyjR.jXi 8М/ \Z]zb1J&ijb (w[ %QMnpR9&kV/k#gLj r+t퍇wMdoSNϖԃeJU?ս}CnGxUdb8}f%:ft]»w>q.ilg"76X_]^/ ?>eo} l]`ru1i{\hK6nm^ͲClY`ˢڭwiyP!hCQ-6?mCMobN1^,Ou-X2QQ9h('oj JW~HtGRƸZ)MEn1%hב:-&iegP4EY,>}khp) ,&#ozK<?WpuYRPQ9#Upz $Z4W3k21U8!r-j(j3ę!8L,PFv&i].<3w,hB-Vjfiڻ)2JѳfmnGknvKcv5F`hT. REf%,x# 5/W8XA1tZ}Ju8Tu8tu8XuϤ7ao.r3*)MLR,OXDRX ,bDRsW~Z jb#u&R-/gfcl6K$bG[*rbZ-7E-!"z |-7($L8/Ÿ7:5bùόuN#c!x/T%2( Ų'͋#q5'H\歌ͯ$-M^,[lr8Wً/ahw04VȗcDs(/s{#PY J27̞4 d0'{E\D=G9r!\Y Bd'*͔РdL0n}D)8Uyݎ۬NvswZ;to#x87IDO}ZFN*ed1*nɒ֑ePi$^$B6>% RKJ+cdhǕ !h2 VO'6Ri0$"aX-; ތP;sw{+e؎J] +xĉPXE9֠(8!ĹQLAlO=?_F=$3NYƬHhB+Ƶ*1nibZjQA*mx+SWnnK@=>x\qJe@h v@ LDHk ` IW1[QG\HãzNN*;mE2ZeJZ)Bc|ջg&H`T0n̍˚ڔ.t5{zBַd]+ Wws^Kqowz?QF.B~I'yN;B;kwW^ 9/( yC;}7"i0e2z#SL OJ<|Rr?ݎY4=L.0=6iI& VLUMOy޵6'rdٿB'{KiNĮwlÑO s($xHE:ݘJU'Ͻ<~qtip!?4W]xUyQC"q>B#ٔ>1_nyW{7lv.ԿçUm )4R~tsm_Mwrmje[ROy*kx 0륶OؤZ{!X5Ro8ocBr>Mj]*.;yKr2"GDxxKq/)(6 y9%8T' o՜&h !v,e6˴sS^r0>zxcN8Qq*IE5q#v8 :^.^eMDe g+!b8D|Hٹ 8pV^^zjser+0fB!qWr6ܯ3ŭ^9n1i%/P0! q;\F.f-*ZQ}1ĨbDZh=~4URsN7+7-J[B7~a[T0#4Ю)n6s$Wdn' 4 >OYAym!&|t?6 t;i]%0s}e+I ''s '+b6^t9 KbگW+_Vׁ}vdW?zz.Q M9L=ʩ]]>,ɊIGp◜^"Oz?}o:͛z)ղ /0.`Z"v%g+1Q>xn`٠t2EIQ(e(C7"0>'JKt6pU\ %g%C\A`IU2=uxJV.:z;pŨH\%U2E*Y+[KV.1\UY*=vR%iZw)/26yK秋1K;X)R34_oeqi,̿}Mb,zK [߼;V30u 䳽z'`}<<܄9SM}{9`>};{ݷff1 Rtp+*m,v|~29fkvGV|bBB?l3>)`|%+a&6{͠қxӻ\:SijûyYSS^gn'Khme1Q٫җ/jM &.?6GigOr_9C<:K^$DQU$5B G8aݜ%rLڝCtrqrqfO֎1"8#lq"ز$Bȉ6Fm JLDTvΊhB)V^孰P0T+6^ԖithiA,پN:t߆陑ͶcZĴ" A2s9ȒmAdA7xsHXs+X*W{2x1:$ؑBrTKEO%BRdO@LZ@LZ!B6R籠+KMքtExD&17֥cB)7j92Jc`;8%}HH!d3gw tm>"W#%7l)Yi}ii}sti=l*3BNw$b׾[Of A!rFBB,ZXJL!0"^}G!AHyiD^:"CGR 7F0ER$0ιfO%<8btYI,Rg2DO'"k/%MŨ8%b"XZYhe3g7 L|hHc6 &-OIm52KsJ=X¤3L-!;ǥg 6C0sԲ$x Y[ow ժGSŐrQ ^ʫYa+οy%G&b3M;DIXIX9{./p kˀe,bG"E2(*s2,jH3FKЄ0޴ ,agd!R&R/5eDST~3ĀG!e89 Φָ\?WR2;*/p`3yTn{BfӇx:p^|{_\N"ȃ.0#UJI#Q`K0@ܢAR$r=  $J΍hT8׈j%-Z: lUûEFMaQ ] -K mNiOnnqٿ}ri y`t2SV6??yXUU KY2Pa^ڦo Z_){2$C~qtip!?4W]( Wsli઼bXT烫dH6|LG޵4{sn^C`߽0g9t/J\hŢ7dq>iRG>̩$}7|avf (71W-nsO)OQ7Gk~*;܎*;ʟR/^j?x?.yo{7J JxN%3Bڭ)!rX/3ТJ5jHi;: iuݿ8:'_QJԆԃ6)zi]zY`]&XfX!g+9tupmF`o5ll^7ߕ^7ې_pUŎ<7-|[aq`D)rHQi J|6M:_'udpy_!:܉G&táuT-6ܠFx :6{LC9&`| |.;yKqdDsX?4|qo<',RQl"!zsL-ŘQ*ihrrBB*l҅y9˖%t//3SVC̬76;`eisn ܞ_R5;ʟŊTeԯFt_Y6KgL/aU|>zh۹|/p+.WHEz[tw>Z-bt i1֗â\/ \o\pGNYzNklY*Va_ W^F3\Uz=|]lU kZ:|@!xԖQA[˺Q,32i`= ugb&VMNh蕕՘!jt6k9pc T,tgRr)i2mJ>=Qb&sR4Yx;쌜&B6pY D?OYreT:ys)b1Ra^@%MF[a"c9v[k<X\d*Q"+^ɉH'!)%"pU$hXP;#aAmsMU]Z_%V3>Y7VMnIZtrpJI1oyQs _`MMp Ny^ߕNQYY< 9Ll9rXN`1B" J EoD5coIAO Wl.EIQs]HL 52vF؝vb aOOk it]4fp}ik_a.zϽfV>I2x+Hu.Je,KQZ$Κd"d)ԼSĖ(30cEBRkFh2vd g6d#g v؝0bA?ΙۂڝqǺ;FmGzGﭧ0٨#ii"#MCёb*miLb'",d I7E aML,hM#a}|*@Fu2u06F) bg."q8 kLL&X h8OSW@Y$E0NtK#ӥNLD2>(D-3%-Hg8uoW<M'\Nn^gg\&.q{\\JF^9e%+L!"w &H;P 'qg {\| \ܚ<]qǺxȻ1>Hަ{oww|21/E%'+vH9I\\ RpXm3B<\5_ L5"D8-N[i?}vJ㪕y+rx}duMjhcٌ B iC}ߎo %B}umi?vWitOf1̄dz._-oFiUwMov<)cs#G>q0ΰW7꒙i N($/@}䫆壆[:b2VyIc?1(%?aoNl'#`P͟JT'wjK᷇ջ6᜜OvKԺ-$ed;اi{"]X)A7uΤ=polm5H5`k&mZlKbvZu)U̮S Br2OWٔ4~dޙ#KLqc>g1e`2}SiB.%. H6wKIgLBY^AK15>4ɭeUWW̟R N%s@0 -`#HD,&-U/[Aca1Ǫ*ίWXWacK9ɜ`<aAXlR' )q;|ïZV$4Ic4*sʊyA柌ZSMebL!8ԭ 厨"NM4s@T~Tl3)*12)+?TJ~g/)ޅOŚ/:t!}{\-ݭ]^ʉq)qD;?>5եk@7O5(t|&Fq-d}< +EMNk4v*vMY:Q͎<Ս>˱}l^ly.1rϽӳvlH5IMoH+Yh%Pt%f= mLAO4e*>Nz8k|sjNn*V'\ꂾrZ͆tXDFJÊ篃P^}=L W6!}]RM<}y\3boN޽wߝxݻ'\ߞ;ZqPr, IFw۟Ѵijo4 MωN  ڽ#>ϱYc̞|׏?CV:hN{!l՜VT&WqMb~z0~EC_oSe!rX SY4&ؼHs~#6x}tSxğk&U,{!e0)Y66GddT V1(6so6L n?۴0-O恘bhdDK=c "b+ŒZL2s'eəV't;s~uOvwq|wV=jee\K]*LK.wls!W])\%*lsžl+,lt ~R{Bߘ!6J'S !L>ܾ&%\:ҵ<$ih#Í{}2UQ#g>Pqtd'8" }L*g1:YyID2wOģ~9?vsi/{c9=6 g:͔[ߏJ>:TX #ZI&T ݫG/ ܺF"<֢{QB(RF3a!K>H".qI8\hٗ޾k&ű:}Ln7}X3(M]/]{V*Tڽr"|S OS FR} s>I|S LF:YεZJqY"A`Nd!TґP9Kz@Q^1>~pخ[/dYZ doI>%$~2/l) -{K[*V}t ;9./WɬN%eٶ-v㘗 t+ޕo]q轄rH6UbZ˘d^qDi+ #,*AGo&]H &xY%Gy{%L,˸CDn#vӱkخx:mt,TJt|VIWVS3pE-BdWJp+\q)HE0Y/ W#WpE2p(*53W=\=b=gjI?V7BƬ\socNO&|:VYTWQ{k$k?Q55%`Ud=v9\yNհ{5t_MsqH 4?N4觅4ܝyI+4_`|4ȸPrj\{F ([Ot[£z |q>gQJ겗ѯwwr63DIinlΩMΗd,j>O'67@z fVK)} ںku9`6Yr?]X9erNr +o,T)h&I,ηMfy%tTdDOUt:6Ń$MkKuS pj]Bs١BZ(ڝZ j7۾P׸ HKUCpE0;W\]B-m+2WWR[]BWZ sK{z=pkiw`"ygઐ 7BzcP W8L}oДZM8qu鮆-bCA q<{3T*m ~׳ W_I)$ơV E hLml Bɨ`Y1#~Hm 3ib{+Tsh.sYdReRF1eeE L68!\]ug Z zpwR ~ .w?s>Kzswuy v $v_,X-y,wx$[u%YMYzZ-QT{)ꜢyexmŻa\ 3`~zwǧ?GO>dTM}7>ǛxxmM76xSoM}7>ǛxSoM}7>ǛxSog:M}7ɹ!yH omrMη69&|:Z#69&|omrMη69&|omᑥҰR|[$|omrMη69&|Y0Z1k^ ̏MYkk!$k\e#|Dh8eݢÏW,*iF%1Ψ "2͓N@Y}}qOtFY6pgD TV:f :1_J7(27`Awt=er Ez ڻNګyںw޼N/Nsz&/|{Sp`{GOT+^-א0tB®1p7qo>~H+ae{Ө]ڕz&~xzgdm2RlD-mKzx%3uKZgLze]ٺ6Hu"H<y)vF 5WWmfwꬋf1b+~#8j@Uvî呿7 t. iiV*l+ 3#\+Sbx!yM⚷cq@9l=uZ1Вk+@ T4Q{)\fYr9̒KB7!%! :,/li6#WBX,{M@+D)=},J[ona,u%}kןm/@^Az41O1ZQe_ZԆm; YGTJnQ6js6ȂV JL q$J%fJ Q /9R9'3UNɥdÌ7ܦ=rcp[8dՙYJ{Pe5s̺[]Z];6لwed=TΖOms}fw+L, xz -]|[MMUSkWKWf̠?!7LN;j_i[ond6ۣ~7eG-hݿmIwwޣ畖Ca'oևkgO'yهWwt`x3="Mͣޒ{?4lonBm^2> hKK>0Fn-6?QlM׷e:7n:OEx]K:;YKOQ,C+hM"JS-_W?q3euc7IFz%#=J4c+T&¨cל3vS;=1b!Vҥ-?q?HྀZĔݗ_&kI^&<\PKN|㇒ Aq_$R>MmB/>ӿ㗛rCwt<\e,?./Tb+_)=E_4vReeF}Rvz}Mq~xr"ҢYI2s|vDE]|fI}^Oۧv44nم Z) <-͞~mp#plߡ+x4 ]ZF9O31Osnߧ?m(|kq6N}!?=;MgPп5WA֕YՎ|p}su 6׋G6i$+A{6xMEe͇ɂScw'ݲg*+}1NF۩9tfLf2>n_xJhqv=ČGcr  ̊Fcvl)Re|vfBeU:&pY4ȟp.#21)%08Odܻ"h!`dJdM؜^WF`ȟ~_;`ܭjccmkZu I³U YyJD~R>QezgOn\| 2Ck;$yes)ž`kE(Z]Q4TƬ)p'r̬(G='|:ZΤ[c gO#U`eN~a`.OzsM:8(ڱzN{If].ŤZZ!:Ia*,Oۈ1ќΙ_–d9oV )~csEz(ZYli4U ]ڕe'|ֿM7}У?M&olb ACRNhl[p|:&@|ZB ֯(zyy{y?WP|XܶU=VN?QbԯEX;4_+2eLչ8nϬS1vU!!x, CeMW`n30ER<`D((Q*Q*Q܁h(*!(l rZHd\JJf0xmJ#>=YCf1Z9epVz=ʧ0(u>{ȟO,"JP9A(Yv"E惙,<`D)ZYr+^X`Vyhd,U%/brЫx$EAW\y&'*Oj잨S MaUp7E\jժCkęԪMnoCdAИ9r'FS_G7[C駟D^yw"(iP8-7@S[ dIsiPqk*%zDzU{鲊!:iɎ2.{\|1L9e%ˣH! #\:m#NkC"F\<:cW~]M}g?mL~W A)K |c2 &'/k0deIkH+ߚ7#As`3s$`=0(ҩ mJiF{VKl`%CFѲ{xX#}c󀎖ї)1p~tϟifq[ AIae阗I8 KƈԀ^*$>re5אVB{L=dv{鄞y;k'5hH(D! c\8h 366ZlpGZwâ Pc98}g8YoH_( I 2AZB J*wT,a8DX ڬwc>svSQ82NHQmү翅R /x1)@3-f9+.u6 d\@o[:H3Ї28a{wT==weR;FSԉe OQ`&䶭^bR~+0+K7g'9WGy8cl#FBc_+23^x: ӻ̩p4kEC+ǿ68S|g74 prW{ăe(Ibqe 'oM,n•K4 .gm t6nR^ A.|ww EVkB?vM|i_W.\J/+On/ðMq߈w-h9H+ ]*r-mO]8,3iLG !l~!ole0.}Gbln{*xAZRhihN p=@=xl֫w'1qɻ8cY|s51Ŕ.4lOuESG*w(1S!A!J^1cѓ=/9F(mLR=.U2_Pm [?fq|5¹T@e1pNr)%g$i2L,gy3R$O6azU@h`MWx%uۺ6ص$lq,ηS[BTA![:x7G<1#6:2-sB#4 07m\r{L2u^^my4:``E綪:5n+P#&uuIoW눮mo(8J0Ĺ)յxlE6X{NT :ջ?sjn\e*5MY7Ȝ1-=S5q4^Z$+sH1m`sYSr))a)E | {b:Mp;+^iUef5q6"dRwɿsMx/T O%ˎILZ.d9FTYRE+u4TO3<(ن$V/O19ޖ4IJUwa 2zM *Oj⬟|v!Z&%tMJCِ'nHِ&.(YD>H'gmВurm,U{vdT펭ǏjMUT^"Jr4 -5ܩtҚKZQ 2P)?a4UQc޳VD'1[ϣKN@B.HFjGz\Vӌ]PVBpAw᮳Œ;v[72Ķi >qr:[$D-']R[dK $gu "$_DC1\Lx/) ¨:mGrhZE6rfAkxWLCAjڱ+jʨzK49}M*bi<#MhCSH)&%3OB Y Aj&&OEđ0>p>fY :i*a5qak/j 0 "Vӏ]QWFD#b/vVT`IE 0F'43.HDA3i$rf@JGFSl$DYiQ#g|b)Ȓ樅ӀUFjlЁ"ѫe!:iɎ2.{\|1݀L9e%ˣH! #\:m#NkC"F\oȵ?x9eFAnG?O`: fʣXZ}: ;);Kr\J͊#5 ek,;TȪ!6Fobg \qaC(mۿ-_h* 5[;FG\/qo]w].;\?VvWMίw~VׯWC5 TtN.1rD̖{.pfV: }EPL#ɑks6p]xyw 0K/s6s# M6& -֍I6L /T'8$LCf)+%aVtô)LH,C"1Y++yPyPĤ4>|6Ŕqan42僐K(PӃC q !Kiy!Gbjlh3ܡ^'nW$$hO7:̉kݗxh< LouW*zvgt7ܝr/,pŕI{Yt[.lIkǣD2$d\lZ+N4ax3-m4qno= *q!3: l,` 5I YfFA|li0d @0~rN5D8qiۓ$ m8(ziX̦T*C[Ly'nv>Ai4k[ށx͵- ~~NkׂH;}m?/[EG 00hNgIh]ryfO&w6I:qQrx&iT7=-FËk~m]Ts{7VSa, {ܷQ^_̟v92G~3kH0~&Vq%TJ(UexeYU҇o4niXVhx|k^^]^92_ɮv+?{g$77@qZ[ ^r-ɒLwݞ]xdzbQ@%3/K%Ѣz܌?SOESeS6C^,W.}Pw_UaNjׅ?~gW/ _|?`_=88"!%`;q}C[nŭ;CgZl*qGN7k' !~+~׋3o<h|k ."auBѕKYEHڪ2B_ٿ -B,Fg>h7^4C[`9&9ܙc򺘫D9oƞϑx*BXS+$Z\FW6*Xc\\I7h^l)ϑza=7 &B ViW!(kgaR,j%iDk 5N_ {:_4Xx1>Z;;?1PqZɁ*㶣^M hvRٺ};_*GJE)U֖R2UVeVE#8;dꦃ{cn5>&:A[ꨢۊ&:Uɪ.HMP7ui#BfÆc*a-:X䍨sAS]|T Z)UmC-%g{? v=)a"]~,%4||7\Ν\t?/ldY`?Y7vX'4}vk{?HDom oJJDqzёͣes~ۓ o9*֌Kpv>( ">*cmSqpNU0lغyY'-G3wHogO\Y[f#'t>Y]~= ?]57bddg5A7uG+mjц(ZU&p qऩr::εsx(dE9|e+xX85kQj<:`rY8>].ϛH/Kv|+d)VW83K##I'ac,. TڲAi|oy'3P-}y}EESo!|C6ݻLxuWsq@JLv#9 N K&x8ɵ*V2; ap z}t{'u|X|AG#k] :R{/Zyn]Z9Ȝoyc<#0N[6QPi\Ld5I+o;ll*əgFwUЕq¸Z:gMZ8J XU|=<69!~yrh/6s5h+dSmd TSC7P9hRrḃJu>8`ʛ&H))U-v$`(gTʈYk#R!LCU{,956F>Ob^?mn>J[{nӱj 9im:f1T TSJWS71=Ű+(XK_$XE)Jy"jOQ, Ӹbc0f;[/݋v/rRD8^/ĂHTZJey~dGS6A CB"ZU?`?}IZTǤ,*@Qr-@xWBqan tH{5yˍ%pZױx㛸(:z?~D:,Ƈr{=WӯbqqG@"~[Tޓ)M| |<8}vѡ:٪8y^ p4T#TZ G0p\_ҸBӥBSsj)sOĎ#]*]jze++[YZYwlt]?ibFWe-,-+E6,вe 9 ovܱShR/k4u4ؑjevEy/ Т3`ƙUoB)B W]j u#-<9yQ`a]z(BO$'sh$>_dn4oT\Z:E-Otu\r7~TJOwtF9P0څ* HSǕt&j2q +aՐ HSԜe\W@' '9/<-m~pn E|yz.] Hi;jfC?(>8CQZγhlKC Z_6ױNhwo܂ +}1_h :;?X%fӫVϋ9xvp/փaex*Y0qN ( wѝZu棶gՏ2PWm-ګ{^9,~\Ҹ>=XyS'tpO@ _2xW_Ju(aMo";]ȳ/+rtbTg]k׆P#m=ga({^c%MJ3KT:3Kkd*LMT# 6CSCppj Hgq8ۮ;dB']Ws3uSwݾZ7 z6bjumգ01V:][o+GHzJ4-1q.ty4emw/ΪE]xMMCÂVC%}E X.gw=U+ض̋Qc&.ZEE)"t5([yoi=?Q Kz_maF667^Mns0t79gߜW1KS)g˓ vG媇VՔT*Wo.nw1Ҷإ9/AaxʪҪ˦VKM;HX|&@OfirzK ɼ0å KW(kMW$W'+R&+T'qq4RiuE $+ \Z|"2[WsĕNOW$رdpr5Kf 3u\ΙJ)!JW(XtpErIWNWRJ+åMW(JW$WTpEjm+T)x WB/)clT:"+TkI%qe.%\`gYjRՓP%<#v); P HRSd\}2[6=#]֘Ij#f8+ HWzf} GV;]RډMyL6=7`&\\SLNWL q1 =ـBFTpjSXΤȸ!N+JW(tpErә Z+T8d\Wڧ6W$xt~rm2BO~T<#MBRN}ژ;J9'R<#sLp9dpErKWVT<#,R `9z~r]2AT0?T:q5C\99wEOW(x2AR;}+Rt'+ز1U/֎G6#C?v^*JB\AգpIR=-z#>dNKәjsnpcҧ2u\QF_+YJ2~`'P4'd`~CY$w;tS۹~/"׉_Dd@ hIfAqmGdCO霎qIöMD?:S) M&SqM'Y LiNm:"cGV1n+Rq<͸$cRP HHƺ"j"&jRs v&\\2\Zɧ+RY qqQP:\\ Pdl"Wl|pe  =4m/C*"঎+R)!j3 K(YB{3PbBΩI%DJ"&]$&ӋjX:ز!6:z 0y~rkWԎ>CİK]AN-g[Gm/:ϪeB ږO[Wh*kqI8bY9uzBIqɥ #fD/}%-ae*>k|',ž}qXX3/߯tձvm 5a]|/g L5,HT#2SZ;uS UJh9˱&+lU:N\$תTpjqE*˸dp%lzIG XkeGJ &8Gz*.^%۫ʑSkF:OX82cMϹr8֧^3jͤ-Wq9AUԫKy9`z58 x^%L29g{uYmrn$։ g%X:5)6˃JDbp Nh 6%+;vZ~j:PFd\Wi'YB0HgJ Hu*9J1  V :Rb\ U#ϸz\i4KɺXL, Ʌd&VTq5C\f-Oɺ"$+fCqE*Uf#UR+LW$x*"ce*Rqt2Ai9(\tHS,[WԖMRvKc6%׺qGc|^*#+q7Bv'v# FOn_oO`KR QHӨ 1uLs|j;OiZKW(FOЩ:H3W3ĕ̹+!8S6\?{WF] 1>,dEă>caeIh8"iZk%Qnأ;Xduue)t5в?uJsjkvBW ]eyAtDQWs?>8PVBW/FK rBWUNW%kHWLvAt:] 1.V?P\}tѕ'v?BW@ˬOHWuaAtdS f?bpztbXR1+]ЕO~@eCWC^)z`㟻< Uϼ8֮CNl;ՓxB7jegZu`xr7ۭ5ӝۧ~fR`ۂ)|$QY%yV1#봓'G-QG s,S|rî80/fia~)K @?DYZxK Ʊ `&Z ] ^-NJㄮ^ ]Yw + 7K6xgN9͒CW@\7<7BW/Xī |X|]Q:Yqb0nq'4m{=:m7?ߛ7oA1~;hv>5"kg{e_ut @o#]\\I}nD~ w-w.ַ@ltLmv8R*_wJשo>N<^o88_[Ք>|ŷ&t_O}n?v3@ܫ]kVBHm?͇ Y;:RDž b^H>c>{?[&iiQ>o?5L`~A2CCCr%O~Wn ݶum^_zր=^۷ɰ;˱%C5z.ؚXWRN妳u8.$N qmqdY 7o_ݴ6mok4uu>Op)9ӓ.JE]B(ycbkQvuRlIs4tؙ;}49eҡYUc]SV )jJ[X5uK?0ٚE#YowBC{֕Zɷ*BE:V,ߎI]efj͉!f-Š D[K!BMP![%k`DMԴA3j]^u9E"gqhѵh@.W޽ޙK:Yiwh0FGxt! A; < 4QEz"iK7mT7YRѦEK{qڃcFgd̉]șbq|}!s9 AYUW-wrZGs!+F[u΁z9Ku}6:9DkQkNҩYJE.)$qu$~`ۄE)%yNVCJ 6ZЗ2l-7WrQUbKT)>$rM NԭhcfC)$Xg dGhOM6duGގ`Fڨ/3ޅ*)ES |FnQx5Wn< uԡmӊ#5 (%ʭ2Tb]vut%N@2hEgK'ǚژ[]JP\ZlhLV0Փk.keB`T" œ`8:'XlXkY ϘC*D@ %ٻV(V*"dJ)O-` \:WlCD&0VjIQ221LhHpu 8t, |FEtf(MJt*T[!z@g3 d2ymB AvEsze] R uWh%W&q2FNt( `\m""*NuꎈQEgJ7ukƃ A'b΂Gܬ ݄Bl jK#B`@1'T j;(y3XGfk!\(VZ h2ݙY)(J892r$XT5O,)Rl eBRFAmu2R*LRۦ{VQR@}Jui^brT4DRH LJDFࠄfN Y"1Ш۽B6ф=r+ZQCk>84iZȠWaPn3RUNJYdFŘ@Qb i /rC<ء3º\ݴX\ğj*ՂY mCka&a-A7 JrA#P`ZDbޱjdZ8'hVY{نIʣj$k4YICx(mƝjy\zrDm i orAVe7 WG `j\*М\1ֹG<p+. X2]ioϯ.ہsݨLm`0uƝMN,4zR6\`- ߝTɌ4QG[S1Zk Q^󤑲zhPAiEBoG=̈=Xuk80)QC^":$b樇 tyB1C[u9vI,WRcLw[zP댂0dJ 3 A)w˨zƮ0XB|-w`E^1H"NSm:X&@57Yw`Yy/aUèC8Gmܬ{r|&uf* E3Hw*q'ڸ':>y'P1.D'k~8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@['(\8k39Z(8^'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N q:gF/ dZN !%:1NH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 r@O- [-@b@@@Eyq@'r,N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'q}˱tz\PhwiݯM NY₌KLV-Ƹ42-Ÿ4:uPb\z ƥ/+o_] u.c1{1>KekNҳzt>x n1t5.J("*j-ȋU@KjtBWzJȖ|f:yfuuZz&uu;72: GЕz{ wӝ߃8y޸ =[ΕU] oW:J tV?O?n6C+z Y[RVt4P*ZgJxH_i  Lud dcAĆQErؤeeIHGKb%QU{_~rcB iLn M'T6ZANJ[~4Ml]`Uc '[]Z%ũ! a + luE)7Ң)tE=u| eKW/7ɺbTRFCW1] P ZztYZ \5'vpBM+@+=uK+ҮV  ׺*Ս+@+1>uJ( kҕd# JesB M+@+ɇJ܎ DRc֤fU+CWȐP +͹z٦UDc jJh; -]aJ&C{⹭2#eyj?ĜA]n b:gǝɭw/7Ƃ|6Egy;^d6w@@M 4e6£_]܃h\.~p9$pk^UF| .`F#ɣ$KD \aYl奼j\qXt̟DVMnEQDvӬW{ɱ6$Qs-9˝RG5m:\2cY̙C:jhy'#Vu GI#s|J/l {^(#|$:(OR g`k.PHx3jg>NLj9+j$~UWpV7R%Z͡@,}|7Fͬ<S?FsЗ pW](si%кOU,6Tr}Dh|9ZoCaOz#&[%n=J$zeVTz =iygyf_ТTՅmz 7O.ᗔ$^ ̬J{*noѰ*l:_ͧ[uv3 b-#{If~~Q,d"]^*D{w(}yԳ#пVQ6v}|/[  ?g%2dx`7p󤴯M2^a\ l6)z.&/ihs2klX-Ljݓe9ҽKYԺR%(bUeߠÇ%>7ٲ~*_)o?a#?@Lo-V?r+ sa"P`Y,8T8© o8$ ;֚vLӱ +e5n>nՄc|.DDrMf * )5HijK ^Ȍ ^`A Ɯ$U73afV kC̅GTGA%y(wXF|rҨE X*(.$34#(ljg,CpR5 \r8 .#ȁS15ampҩ Dˆǡ(kfD2bˈI|:-GtN8e,/1{18L< qQt8 #@HyDR.Bb%#1ց*9'Ӂ'MFb683e?@S["_():kyyQ} 8FֈA WVw$ p6I)x 8SڤP>_GiW,\}F O 6ܚz=W1ًR-!i$&i#5#~4Wzr15{⠎ԲI$*gȫRs8 pQk; M'7 {ia$g`) 0#Z['Q ^5 c qڐz Kho~=.Sv! Y/.-E`:uȱZSSDD&+LjnDpqmO9ƵkEa2%wP'8+МHP@+B6*| )HT:Pu+6!`BLB1jveZ5n8z|SXkccŦ'4B#k_B2p9t)P|.l[~x,nFNnN3 w:u4ˍ:U GlZ'DPLW6Hij5:!xMB(VV?tŐÅ E#V=[ڎTxGDJNcRɜRafXTqi/VAyyPOܾX YK#x8l"RDX(>Iˈ8Ub YzME !Nnoh܊q5r?U״")rZG6G,XSi ЭSH)D},(48T"Jaa &-gs"TmC#cy|hh\YΓ7d5̲՟UX8m>L!`vpwVJ(6Re1뛑<"윞APA(ւ?8/UBK;ȗ4.RuG ŏSD:9c"\U^tP{˟J@Srm)Xv\:$U W)Wp}]5IP|ŗUml|,pMLI"e^~[WPם]Lra8T1vNOAMwhSuxYi}D?1zmF&d7ʴx:O# QZf>/.]7ьz08nׂ#h+ނ*wF*ْ6ژsؖ ٚ N٘ͺ|a4. j0bǣղ<=ku;WuYmn)+FL*-`XHʘ48f6q5Hy􏻉 U F &}?}}ǟߥϏoy+&yo,tףT_Gֲ!e@O;d:5zQ;PEޒxMJgvV]n@~~3? v߹Z.hӸu  \G>W6$BHDmTkFvEb#r'MzlYoLN?iM#ǝ`}$lqiQ31 Q8ٻFrW U p9@٧`u< Sll޵%*6ɏ_R+ɅdCHQ4H66!iwϟC28a)R<_ēCJ.Sb?8[ЀR"k-Dٔ՗N;Y|5d$-@ə@l_bۛNuK  >CF/xBcVk.eC{(ǃƫLTmjB Y v1 r9vyd*^'ܔIRV-M!xDA!Y'\`Ȇ$sI OXNN0R|">}yX]${QgU/ha{*8gќ:im?OUs][AqdGGbGJ(?AY޵RVˏÕE&Ę26lb v@{HB&$dGyǒv1&И1Q`Jgg&"@KNsYU o)Jz*B M8km,2?h#ȞaogAa+ᬩqD^߿Yň-|J>Fy$i4LE]cr$q?̒&XQHJ뜣#31v9n hTIga"xoZo0"ҲBSf_ޣ5a֪H+lrNC>Dd)9SK_w {Ao{陗9lJcvM)yխrh1͕Dhu׵׏vX*[nn n_[7WwϏs:[]51al; ׸ڼUIw%ؠ煖CMSǼɚߨ8u.E}^=^d< O9w;osHfʄ`ϏRFH>:|07\RcuFGL}8R_l1e- H1OdPLdHjUM*JJqA,BB >Z$"dˉ D ::7 &%lPPdaI+9IE_WW@shW1cbR[-_ȫM'| j,b҆1znjPO)ȸSS|J\fY6Z疔2w$hS BKH3+f1ŝc˞a㸬:fᕱ!->ր _:.Hkɜ|Q NdvH)WCq{J4{2@S ,BZVŬo>5MʸǮm|={U6{m݅=o ́1paor#>ns4υiXB|Z@yRsM+md!8ǟ}|/۹eqzu$Կ\ vVLdL ӵ@n'jMd1 nQ:R#Q*T9bqS)r C/rRcWXAd?[r_ +AАwq>FKȌjߏ=>uZ`Q)\^vHG\\\x؎C]~.8ҿ?Α^@Α1E(45$& \)H$KCb6?]2gP0sD"2oր-agNĐ Q#HF碝~3JaRל$ B ؐ/Cfғ,bz_7q6_>6Wח߾Ґi;G8Ky3^W7vR>*@ '~{?0wѿ_/]DtE17%X3 rIFd% /Rfc3"^n  d1Jyt46RN&q&:f 5PZ~b!\D*cJdr R5_\2͉R+/y߱pTxNOכ8$/f6"y@n../`RfS,*Nj`F V`rQ[2z3a|BC ^~9鷵Tl=5\ rz`W}=ǛI[䟾L(݉\]in |hV:b#e<ȣկI4ڿ8nWq^{'m[9nԖzG낻ewLdMxiEɫzO;=T xS/E&?a[N~z=>_ܕu39_ J:o~ZNng:Ͽ;|MT6EKRjS{}h_{糛1?>X#74ĠmesNmb ~A8hP}%D`KŰ,F\-E8Iy{`~:MB%A~#\%6Zm &6;] HTɻȓJ9cڕ3MM[\]ͩx1^ӧt}[s LM\3 >$0i}IWWW"kɦ0+&VVu-"ȑ.z v%"[==٭dCWUgEdR`tL`Y 2hLCɘb-Dhue'VFcw\ɖaVF ī4b ${:esIM;zp+9T08W FeێE`Gm\ogsQhH [: >?\~TCp>Ǣ>9sBɓVT*VD+ldIMS{p{t\aXmCcPFNQgEgu6y0C\^F%( bLL@`̀'WHT9`L-XxHly֣u|nV{r}Oֻfpg·`YB eJ1*(F:3"8Hڤ7iQE0n^6ol{i_7zeb]j0XoybC|ߎc81U`'cܵ9ՠnRJ7c^9٢V6Nt2*6C*b`WXΞ\{Q=U׺S+\w*WtN+BH:+,%WJT>\Uq>biUW?\Bl2[>Dž lQ:qLvbiTC2W6yzcu#*tz䧅vS6 >\? 7?\wv&ONO2L@[k"X{ᭇ w|s~q[d]퓙p;ׄ?O[^%mi^Ռ5F)M"57W;'x@կv3lLfߧ y3}<:J뮰}nq(!4Ef8J_vXfQ#:],`4^?ˋ</Kk D%r͇Qe+Zp4n>vUd&pMPT2Dd, bY +AАwq>\$25b &w'Iis}.kqm67YBHStDC]c &!v9)ɠt'dY`ia ݲPŕT, UZ C,zP1eP+X1Vq;vWH({Bp*֧WUZGC+R+fC)#4\`dઊTrpUm[lso(3{Mw63wM{?X%U=)Bɔ%lCp99wp s49!'W\ Nf*H{^1AJN;zp%E蔶A`LN q Ҫ߻);v<*$G*UWS+ %C\=CRB3yJ zkq> ꣇+Rೄ+Mt-V`:8 g; 3נ &1T3aCv .f.Kq2RmKy2i߮?A.2" =7IH H2}QT~_r!Tf|*0ɠfk/,E/ݽkl!q ұX@wK(=v/t^|G(KyV%%1"\Aq„2VBIN<8&z_bČ&_jNC_A %ŋ^m9k{@ףOY"Ě6{JOT YE@yoɼ֒F7aHܤw&6^d/(O#}1*/+-v7-)"@`|d [ѩp : %Abϐ\5/\`c{I}l]к_Λ+1c[ kt;#:DlñӵsTsLͽ'G2&ai{ 6TM޶v`8{ 7_)mxӳPLm04&q|w,z`mĚhG1ۇT CUbm]/h\auj<;_=1*23FdRkETHhr}pLFijMNZ[{Z|V-v]{Mkc" ( -H=eVn؀صf DW[ÖL-Mꌓ'l8hyñ OL+gEgC[`OmG@>Ł>>I^&Yd;M:gViFUVۅcF+XMucO&U(,L-cJiy[|liS6K۔"hw"bSdw{>ETa\*lkF}͐ަ3d]>C[EtK_൏NM3a3MKg-{~=X7M7%pl'>t©UA{IR"-|pK^,:Tq "ruV2uvcKln*sw&>]\qv/E=&9te0,u׽F޹}GVNf]~/~zRjA`u:^\ .==~j_ W# [[ n'"P^\[I)mܓ-tpKU284E"A򂁸G8ф:O߹)|4gth4N$*LRTC4qһk0WHڽHNh +q2 ,7AZ} R2-7pbq}Aqؓ ]6ʍrFDcΗn0[_~}̌QO;n |]d W;-D %9/mCu^=#_PcrEq ?\BVZYyN{z^9tU1](bi$GZX`pqsD\_ĜW۵ [71O9qzz( 4sL(RmC9̥9"g98n 8Z)ese'RHDsrWZk,N0.eQpV\(4t2)Zu(x43,OwGCXQFR $:U,L/lچ1kpӃ :^OWv_znzrq);D =~Ɖ=ڑܸxf,Bɛp J^,0bd`H']FÙ#כ$w@Mo{s!B݋52"y ]N C_h9߁K0*BEAQOmCE#}Q\ыy!hY\o45Z[:&.B ϘԃTJX^|`kaF} ҫ߲H9gBBfT`D$5w z "OES d$`g (#g`:%P! `TʽLVI,:Ǥϑ.Lp8%bra\xg) Z&A&}\F~f7 @mьbn+X^WOMd wyqje $TV#✵\:f>7`c(fQK",ϽGmeVKS{^&z/WzS'jhڬᷝ}s jҺ}ڞ6gLjg<ڞ[0K!)SR12©0+ig,g9^/dt(W[LB=|O/a\O(8 ~hDgf_VuDhkn+>>hߴe2cK"{ʅ2(&S8e'4sπ4c>P*;&5-I3S2MNԔZ7#DX9$,Rn&fv\6.JDnTy6?l=-wgPes`<ЕҢ ^*Bιzfd)%ʌxdz:Y =|qȓL.scH*dN9QRc!Tij5CV'ucA@Y F UȐ812QI\gVyK cz&Y7[nүwŊ9۔A$Vmˮm BȖR &pUap#-F~0.tMfr,]f+Lyהe !n2\En B!JoeG:43}q.pr=3 0~pO {1FbHY%Gٯ?Te6Sl^/#ltGVt<0CJ!)g|<Hv1N B_Xqr -(TF-5w)݋2~`"uW,/{= >1f"zx4/ReKnJD'3/Wow= ?s%/˪?g[+uQ'I3 }z6 jbO!-_ABWY /|)nrEʤ׀Şe:Ԭ5UgڡذP2kLƓME~@&k:*Y R6Te\[ߢTU*)=0k^43ݫ*֝vUj}nfםzLI2/<sZKy2FAYcQ)9BQ\<J,Ϥ>v\Z8/CH}yB-}&kS32!o`MzX&Aefk[#AaGXcN8Q*'gL,eY>qgzAmK~+Mm[Ӎ;[ۨv=A<l_@)dE^xX54E-rmhwGn3BXFn+#J!D~R}hM##اAv۸Z^/E.1ω`K @?̈́"ŀbQLEP$̅.`sH -wPbFZ8)ňg`.ڍ!gJYōʰ+E3B3:s02T0#4.[? 7ϨZ=w71.x3N]oƧc3iI]Sb.1?ۂ6W!r*Gi:ۂLuG=ãɛwlVNMrf˘ d1uc+W#w=;ɲ\:؍c\+cft`= p&9S [f ˆ\x75@Igxt1e˹Xve|:I`0#q%Z fx!SNQ[bĢ>V4iɺE8bu_d~NlmNg63n0[m +v(.gW>ڀmreg>t ~! q^:J%rk ;Mw6إTr:a]PH83{iǜ9@*ݴ4w+w#\07?_ċ@'QX\) fu>/ps^}YJ.>"qU Uɭ)n`kا»>?{\3Oop#*]cl8( V .i)p+5fJ(R?@2k3%4N*7{F/=Xlvv;/;3ؗؒr&OuE+mK;@["٬UdW8j@BXMg #?;Gt6!lxi@Q g(;]bN&RtTJX.DiQ+T~6gE@rNA}, Hlh/;Nc4okv&Ύg!v8F$:dA69)Y椴(^ekATITk~XM_+ⵙILP\̺fˢmՑJR+"t A&v]Q붊;*ZXr:'䪍nouP2#X}}yVxN޴1La8ZV6n2kNy^?NQy,8T@@Y #M6[+5/J#` '?_ ֫D"![R*!mdɱT\D(os2`jV0HuU:fƾX;Bci-n>Wx!b 8g_Zvo.&zC/._./ˆCFE'tK>N䄎})Tlh;#fV_`OLKnk#aQ< I AFQJlaߎ":FَagR$TNa U) Օ*uPcGc3qvR\E$OBS%Ɔukm痷[{hט] Zz`kӢ̋΢ɝuoE4 `N DPZ/վcD n[d0/\YmߌdhRdi%iUU!ttd Bfa)U?Np=U U(Ouo]RX2ŝ.?|4w&XZ[t˱]ƣ>ߴx-"cdMOʸ=)xO뺑kшkY,?Z0|4ni<^Wv>Гe͆+uW)6:uyVN{HX:֦kqTxg0O7wMNqxu G~x/?~??~+ΟynrkDEW W ]ۮ]3P +Ӭufq ~? 'Fzв\5r=5y_aY/ Κh},%"BbA{ROmI'PO9#;qē|$x#G|dSbT.S"b]I.$\r%[15{Kқ skZcp‘'U06ۇ\V}ਠE6F8'ʶ, g:UĞLq@'2=9ف{]x1 (vA)뺋o(Ti'j |ҊtV*^պ{WE|u YH W^yk%n$C )~X#MG"(TH䄫uϡ`VI)WX$XR|5OWP SN7ZٽK7/`2#Wxɘ;حԮi~q쭣CYG=BNPfWS)Y%y2YJ)l_(DDM;s>0 -}{'5j|:wWTVX {s"QJ#IcLLzoQQ5bϔ᱗,;ZF2d0F3u&*DDTJ#[·Lܛ Z"zL>vGrT?6Bw]Ә|zNwRcI dH`-CN(b6`#SZ7&$T]Lf'8ǽeTGOޱh EI%{\J zVE4_oh#-m47>7 eb U5dA6ҒF67ޓ|OrCWʼV|*4x?/uhv1Pp*9i d` pH贯DKZa4)8Rq$9|3]\ O&wak3-JYo|eY2٪' ]w%UfdWa=RB$m ABU|BV Ay]ͬH\1eR=<"&()`=4AzM9M)[ph$%[Ε# DJ)#}"Q (2ʸ \G,IYxm)Dc; 2) I{vsE'>8AKJ^+ak1vQȘȗT*H,BZWa6r^:vD:vd=tUG4w~|3^Mmeי-L8\ O>ϾGugӫr9gK~*-&@b<YWq"rG/y4?fl0mu~n~}T#OΖډL_.:90j8-i+蝃ՍKioRƤRP bo?]b0ʺ"D 4ΧH/)JoĎzt"-ufU[YK:TrGWQJG)"A_,XEvؙ8ۣ8; ./>?z i=uG| X1\0yLbR9LM QHژS7/{jÞX FFOٲELHb1  j6ޣԩyis9]$ .`Hч@ -%/;Nc4c,5\߶)kqG& G ~l^V5UǓ[{ջwhpkx{Ml[c6<[f4=[woz|uӶɣvHgvrhqۈWChsӵO49c1o,[_np&[]ܳ[ͣ6w6|g37ixy{Z^s|ۛ_|]xTv_nN3Œij=Ԝ?]ov^dyK7#6z%%L,[7R*0ɓ9⺓9fiT~Z]5V  ;Zq*pe*)>vRk+ҡ$r}PyJ$w28|Dlwmm$IW?m/ț~Xޗi4*Md[=")QxH ȢXbTUƉq1 d\#Go[:H3Л28aS(l m!qS- m!)q%W-oǶp);%*L'W$T;\rKuWoښ+ؼz5p*pEZWEJ!zzpJ yJpEZ x2 EgWEJ#{zp%%@R&O?o SDw5JVƞ="AI1 :63ǃdyByFOTHC)#f/42UIhBQ"\ `ւKRp%wy$ZeTM$0q6FJA?)45ԔkXgjx __/MdZY|Uyސ&X&<1 uHٕ}h`R23>Ȝ ]]ojrO![ `R^={E#o^ǿQ]Duf3l`́gcn>="qc⹈ TI@g98L\' V)+˅ʎ9rB9qό) 0>&EV WD3̎c=w!Mg*~Ty@qk5O? ω% C{K=ܼ?:TOÅY!`hD4K*q$ՖOA2AVg-S)qk $2av.%e iUzx38G%&.w1({lvЈ]TKTT,eOxBPRXA:e?N`3259ɼ\Y5$SCͱ}S|i]$K\-l]H͏偮Fl#xoQR8=.v5ooګ1.~?.bLsMX#>]q}K`?pWB],R?umAQ*jl9)ŰibM _t_؝sŊ\JЯ 27 ]zJKc<3Q8=ʒl)~5ʢt{d7ۋUa <{|;,?e΀3KZY,]ovscRJ΄Hd]b,Y`F.&A ED;}޲qqo٘l8 |ߕ-]\=VM/]vg4G'q;uc#)lmWRI?~gR16;'@Wn hgm* 3=I xf1Z&J.L5qvJǴ!g7wnOY"JP9QdiLcD)4\GC˱ncD``9ʃ" 6$YyP^xÓe@&TVjlWt1:-MRUJͱU'檉#U_n:(YX}|Мցp[ltg^)TE$mzLl N\F)C4coeL$}=)<Mt TI#2Tmd&vdUaa/X=m%Œ vˀKO޴jԮ&G×OqyaĖ> 2zˉEԖ(,RŠYH@8 Pu #2m(Ξ 6AW5QgAmBG,hSdW\cAjc_VQ[3M3is,}@>jC, -r!:b*4:EIQ 팛.xC&fHišXPi\J958؀gĺbjr>j 9ZrD+jxG,"04dRηYHk6m_:\CMSyƻz`cwFwMF;gQ̗fJH=,:-kIkǣ.{ DmdW:q[+Mfj2+)T \ĵ"Ԇk :S(5 $$%d"۫pOnBiumb ΘncN"ȃ℩d1#RhJ<~ooơ/evQ$2Aӕ ZgesVDf.U:YQ&}*+^*^!9*q\Q1XK|&)DNR' \ݷ 4]P2W۩ ?pKWn'8T{v)}d=}.iy_~k4Xd^1{Hig|t{{v>[igs;>~ɽL, ]=]-F,FIs*Ÿ{Z>G)]~L`g;\185]{luA.uX9fS:(~4ұM(MZ)b;c޿6jW5iO:j=9m?8:x˿?{>OOvr҉'a b mO?"@-x'?=kTߤkOkt=: zjcGn@R~˷]wJb}Q.Zf樞@*W t&w+D6;q5UOz&UEOb0#%+4BM~g|4~ޠK O؎ NԱij:b /zo46 iO ᆴxCW*MFd cdh٨KR1 Ex ;Yy[>hm\TA$[)U2ˊI%.{dI9]D`<뜬Nf/q 5SѠuV"cZ{̜g#FĠ>cӰTi#Y,CmcRIsNX& G<4ÔQ m}".Ҩ @,`4ld4}5R6[Aw\JFL74H$YVkD+EI*2h+{6ȡSۃP\isL;s7 xwJl:s8EKCHQ'nH6-E};9ȕIG[yY>8YO*TdyFs D'v܈׬YĽsTĸ#'~ HQ1@ M?Z!MRs_ LbV~fԑ|A5i~N얛x{\%GKM\cI;*|i8tF3%ѥ~8׍ #JI&Tz<ժGF<8@flGWҼ樀t!)G֌ېHOQty8 bv[EN4 *@x.1 )K,):4He670t.3۰9ko MCR#ox8zkf%t BZ_6TȍAڂ &S`!DgvѦF\$Y>G(U&Y,Z-Ipdp,} 0'@*h鈩r`M7FPz||{鳣_V3bbܮh-=YBл*:>Y?e7^6Șugdi+4\T nJZKr7 p:}!cǬjkmwxܫ1;{1+u_Oڟozπsz󲙉}ov,bqG]'`F6x/t,)Mh{ ~lD+1[ɼY*WF#,*AGo&MŒȷݛRk4gքٓ #l57 [yC}Q'^YbbP'tj>qWGosAq&ȳ $?"ťY o}+/ˁ>ꢰBܾ8 H#EH7փ!rcJL A+Qo9k_e7j{K&x/4J^`%NegY5t,'[` -48Ozє "b@H3-,b !>28H >Z*CbNcvyQk1G,mMv]Vj9ܘ5{2;DHɖrx-#1W ~2iE'FX~o=p5"\ j݁{r,0vYf #$1*A@jxs}DB%UeQePVQ8Hts"Pஂ|}}Ʋe9e.>^."jr:3|kSTH:{|%S+ӹmZw˖~6.zB1Xq_+_}@ˆOյ醚SF H1zbuQڪT{RR_a%ZͬR Օu1WYr <|!z4U͏ղZ-Q-kkF_^5ض֞4-#VkZGUYV,%Są&RpJpP"T"TQL5+0т9 k_4uF1" AōBK5Nx%]fYr̊+CoR).T.H먗/C6PA=/Q! F&h81r% ]ݑ/T$s'؋?lp#MѪR1pDۈ`-Tl+Tl+j} ڊmbFnvP1 hqi#`e<\N)X05BІ mh:1sRRƐ YDz x W96j \JZe =rRyo9@"\Rp;+U^/.9k Ӯ_aoxNYVsup1!ø?V>_鑾0]UGc6>>fiz| tzXItJ&r%i~Hutuݶ7b.A+zyyFv\v٬7+z]5Zxcce\ :&OWƼΞ'm{=# )fp;7yӭ9u7;)$0{9݉);9%gH,[J܏++8"W"\+ȵL+Pk䡻-2+aͥ'V3W✌'J?u*\ ̉Z4FHbv`lJWF:rM?i6FBKP:l]u#0bX|r8 cm$H"o:56 *L>`dlWv]0d9ש/rZ-.E,:,`Enb7rTWC 0[y-->&ZjLlNr hLl"W <9@Ml^'EkbB[X.:"qEQs45F"jCW WP\IG$`шB>Pk4+8,;qUGsZ{Pm+^B͝TNZu͠lKzXƝbJ`.Go.~cf(K%^Bb*RTTV X&n;bke|U3~hwC})~A2N>]go:sU80Le 'J9K*,sYiłB1Х X*篽c=]6zw=\\7th_o޾yH`Nپy:xF8X>_>TU46[]&,ȥVh$xdL9Oc"sPǂF&Epfo،'{"ZIU,7wZAy(jQ 7yrcϹ#ؠ3N+O'xl> 2:l—Xr(T&W`[gswf(܇s1庤()ЁStѱ&t\ANaME#slr}^NnɚXLݥ/#{ yumw~\O~\_ #ܢrKj[mkTs8FϠ1KԇKeu Ūς1#ZKuaNhKr3C1YC) l rAY-ZjKDu'2Ȓ{/KHĔ*1 C2 chQ +՞klCR%x)&' IJ"^EoHȄ1rgtV̨ꆬts$p \HZ{}rWy Jkw݁c<6qt:\*莗 <!@!gI-G.<L2F(:AI2@rYX&ǘgN#FGA' L֋RĜ5qeRMKy\264& Bհ,VTށ~.$::Pew0|[T[A[tQ*cYҒ-]mo9+B,ض΢0 ng0b"cm#IWlXv$Y)[tıE٬E*j-Φ(/'&O+Υ\ dV4 yL (jS Fm&nG9 r8rfBW.QCVǮZTڦݴUFTeTPȁPBҤ9戞Uސddྪ>Ժ!2$lċY5X4" #(Y$Ra<[R?F>E#V[jD[Y#N#vqӊwYIDoP*'5'#C i,G7$FtDq㤉"l,hE D$0HLZ(+*pֈ@zqL5Cj aPs_!U8jB8݂bVYj^p>񔧪#tY/%Ճn)ͱCGτJe3i[tYUOgJhK 1&HZ&@OHx[eoitX#G Ha0sD8 # >qs^2輨&- Q tV)KX}hKɤMJ$2rx gK(\o 8݁#W |aFfsjbMyObFut" h IM)L=H5b|7IU%8:3&Lr稷$/9hhZEFR$:]ͺ]äHO{ ]$Kۑo<"-߆pDb'BAdk1LK%/XLV搮sɷЦ]YlrEMW;eBɗ@a$ !$2*/\&xo ЖdC>h+Η:}ċqɕo۞[45IXe\y)X^}Dd.`JA kq88JmnUZXY(k$sRFFNJ8:iT%~n?f-nl2 ]9(ΚaȜ%M] IO\e XS'Hui4&pK>DoV 5B\:H)M^_!z#ϛ46_|ƥ:@DdK&Kqx.xB (]fvm Jfv{M)T"izSM;\yәgiYAi0)ܼ_߆<Ч:u|nzWU"k{=mxryu!ŭ.h4YxQ%ztz ʳ eh^2pl_C9;ޞ]4Ȗw>c砵VJxH!et%#RRVc5#=aVzuHq7 ȡHzz_AE/D%J29ǔJ6gy0"}tP2T[g]Ov?]xVmvPfSBRqZ&tP1#,o },ڗ^WN铷~:T食ptKc>DE) 8 ? ְ2U&χm@2r\,%8}Q3[FklJ;L Pj,C4щQ2e]f24;891 fZzsu(CYˮ]~q1zaky3!-6ՑYR .y8P#0R5!F$H%bx-LS+T&R?G,答bVs02mgƙ >'d5ĺ3\H}S/YP*t,,BWXc̢OF:L|_$?唨Q=0f Y*' rQRtv+AЕ 8Vhh{{..uc-!J#XcT)/P^ڔ|JFg peCp\Eff.s) `lKVΖh@dt FfxY0Y U[[[4pyZf޽yF<6f Нlnۛn|̹EKwn^W7c>6k~8xz$N@ó eGNSo;[,o6mu<%? t)@fL]fn7&nN -Yk>3 UAEY+#6`jLH!ϔ3Nvʿ_f.gꚘrx2^^ec1k\m %v41 )9 =ˉ,;Ȭ"<o.(#r1%yb( ]-p&HE?hFFϿ`c€T9ʣ$(dˆv rER ],z_BNRGR<" ,Jrz'.3‡J I1>:S z?+hfbb^[C")LP}v" )Kr;աfL=5{DLi ^88F1E $|K(I7;; i.3ܷUhkpe13>9b`joqwsyܿ⋲U}VhWљFV(B| oψyw;gܥGd}T0A֔rIF(g>̄N(Tk ! 0B&m:*.!90G͘V4g/(+jݩyz3AΤ/( X b?{WƱJ#OɝTB3Nr'@r'qdUbL 9>MdSԢH Xէk;UgI2u*A璴%)ut5SD1̟LS[YC7^o5l!rFBB,[JL.0">}=Fg#ȡu6r0`o 8b.f$ Sd,J2-hn970 GV Kp3"اBL.DpCLDK9 >E7 zYV8l VVnCMaBU7ֻFg^ui6*P9%{9AC4DSXp EXN$x ٚ}_-i^MqTty/$Cm¬ؕDmL1\ijo`/gJGo[>DpaZCr5c:gS/0aNFy"rv.#r6O Wmpc์E(P@SfX@baP-@1^&ʎ #M 13)V2"R 8P b#2@Pu6qvZrPqT,JDèl~t;v;Bm7|褌;n4'k.9w=SRypfdJ@Ӟ3i$ "l [4;T@Zj ? "< "\ck%F@VY*U`kDK- c%\dlղ fWǘH]z;&wQҿJm(qjRpLL4TR0ޭ0f40AdͬmSLbR9+8$2tP{5_Y݅/i֫e@ZǁnRq/ Zx=\p丒Sb b"LvI訍5KPp._P0p2=p05=g)I}v]Vs"l'!KAʗ6h3{~2BFeq0[Pך+-b_eddfz oXMK.|ebF}y+qmeѸxc5j"`U&5?H-W6 7R ǖ4d4g}|OLKs7ok?A . gi#p ^{ ZX3֜,z[-йP/t͠~/yKL"CgPT2kr]{.nG@\;Gp3X3?# Fl 0*Һ qdo+>:tf}?A^;\ah]|LSY{2͛ g%i.9<+yD+<~ ӚTdZq*n{`f¶pZ,]f?FӅs귔[,u< 9W6_c՜- EH%VpKn&'S)M݌f0v],v dY>N./@1W-%l .vQ>ԯ Ź`Ecx8)J Jn'Ah֪;U=w*GOk0? Y Hi{<"wK<]i_ޖ'ⶔ;˺=EVKלKt6[F- >HeYŠ)'9A-kL52R1O;Uةw|i6 Qdžu! eoȩ.y}YxJ6V۳xltRHSK1GybR)g.lr>::2CP AkSN;o(gLHfXD1l 's8p &y"qIЁ5W U^l?jۡmlТO0}XNo.?ezyc1b$ۃQR$Mg !Cgf,3?g3^ꈿy$NF.Q~n" ~ġ&+}A.;$$E>JP܁ňbT1"-@sC jIM)FrSl+7-EĖPƽN% 3B ^X3ŭM] \~-&:OFi~m[Ї&׼w;hAZvҺJ`ۖILW&#rmMRk Kw9:"Lu"(ga٧!HdK#"j'" DǁŨ9& tL0L)8%ˆ(8 P҄0zFGDhq .VnH9)w aP` q$(u4)"D-h\Ҕr?t2&:VQ.$JϢj' JL7ҩ~iǞMciro|Ǯg+^k,煫Uϔr)5Z6g8{o]hKpxrqj9P mw.ɂINX -DB/{a К@Cc;>S2 Wo?1(`: 4&!~*0a:IӄP ,%?Jj}*p*5p 1=!"'WI\r2*Iˏ^ NRʆ]FbKN%D\ \c+R^#\q*+it2prNϮ 7p z,ۿJN w> XM ~ΑbWY=ۻ$z?_*{Nqa:#2,M6A=|c!kR i3SA"a6sEUCkco~?}[PǼcF(+gV߬W[lUc]w fR=9]ZerY =(O႓)uȉR4gKrY n=qc~}f8"V@,/BΕe.źqL(i]HUEH%0OEdn%_hRtL->{dS04M3q}W%ds9}f˧Tp3R + #zERhH Av5DdR^fk ;;n4]D0s,x ≠ tIgUwzn4 M7e;Z#0xZ槃wa<av],{he i6/l'S*"Ȳvr}]'Z))wVBִE;jG%w mRet'.mP:wnmY!/6,Imeǒx[d˔ݞƷn5dUȺܿ0Ån+r{x>;pzn[zv>nv<={ӕ\\4i_p&\n]{atOz.XֆBL`>퉿Њ/ld٩AK'jv 9|KC_vW&!#M19E1Ť s)xf ,"4 lDîLYϼ"tV0if8xO&w#@}tvAwc1\V]Vz ^v?i~i8pU=z5dOƳq;77gdV#y/YDGܲ(2M0 ڠ8NNJPZ}H -wB1&cΐ3sI{)<{Nt6J-%g:hJ^"h#d`Й̃:yJ@x#؅\Vc[x.VPm}nbl^Sxuٶ-z!}r;h `6ÀW_ rʽT1xs1SlrTlE=WHbCv]CNradd7Uo@v;ݫ/Ȉ Qe)F%8f`ǁQ<oApLɐ/zhrRT]v ־)sNaWKɴD˭T$Qל )^X Qxg\jp-PO~eDj7MQ\e;o+qW@p5ig\99,4R9*횓ٜT :#_*7*2\P_u (!HQzrN'it̬5Х'NjR5 LI)8JH:39¿%+sDO~)C(덁)OWZFק8龲l^ӚuN9~}/ۥN{Zt|FtPk_Ƌq k B֟G\wt\o\㽳Ƒk@ QQ`9lUPF7T#tت7S#t[I5"K/jD~5"RNggW|?i3kL\Ǜd"48h mxYG50ʞ^^ hG%8t0 0]qb[E.GaV̩~#0K ) ,%g3'^}It06SZwyd@fϪ|= F|Ȓ 6(0)ͲgR AALPXM@˩n[$cs&pC]D]EּЋ2I %)Uv%&,XYQugIQ%h,Ye)xqӗXjroIt~ysq>2%ˤӷ= %a<|48J;VIZݎ$m%gRdǁ F MҒ(.Uр)X93QXB l.1A[LFdj[jmajdk ue[[x|ZLH󫂌;|SUnfq>uO&דˇĬݒOR[0IG\J*j)Z(ԗӂsi{ a(d'dBb6Ѩ`T%AB9TG,ZlugӴA1bIǾVTf&*zFTE Q#Bi"&1 XX iF&E^o2!CF%[E#P@4F|HY'T=֝%+bbǾVvE&N8k*12JIsT)y͉*% i,6΋4r"T1j.eJbgB TdqFbBY-WNUP;-bU>uV=b" vq[#_!YdtIN$&!vdRho9b[ و..&Oa-pi_l`vF_{oȝoYdE'Y.{?.3I/<..4 O?,u4 jBФ(xH@{H2=ZiΘO:cr%WDZBH1%.x1δupiz&1 #r!X)Ejgi :#JZW#ִGe#dZ\h:oWw&ڔ /\5JȍY3ID8Id`gCob]?AJ&gTEdr༓@ȜgI$M!Cmjy3nqP>)c`,TrĜ-}#҆涄0. i}T2BTW4tR [H\JFJj<(EdY'Jv(xV+nZM@問j rKzع/w3l s1g8X¬?'^r<@C(F7 FiđY6+SOCڝ{Տ@#h9D/ 5%E;$B i2[uCyd*$p̕fZ%KMQEB-A  ՝GgT Rln+7E{Y(:sX2*/'^T2]KI3Z6Mu/:z%t$~OiuX I#DZ)l]&9źaDO{ C$[ۓo"6w d#!5NUT&xKa 9220+&J "tx8qM6`=A*^i pĆJ% .Al2M"$H̢$#֏O+˺OLsaEVg>-YJIٓzg4fǝ[ V +uK3FOg-q]MN#i4fGj]qT$}'L#O#tNƛхoH{G;m߯~x~vq!GEM嫹#Fڴ(۾%~Ei}Fw~yݣek~m~Y\t8%8?<.V۱Mϧ?_]$~{B%-):[Rκh); },XVf5г6guUzɮV;ޕ3eʦ:t4I|v~ˣf8:ϣb6/v5ujH&~[c砵Vlղmk[?X\ڇ ڽlzHOWrA Oq:}e5&EGO v>{U6<=vvol__LIr*kp (6;[R2 n9GH?WYW2?Jgگpt.|_~W?]n\r_㑻B88ᇜ #dHʑ߳zfx!%r`PܳjժUCzxXY{\O_p~ 󢽔B͍<~B \*\܂s*ߕ`-~#LJ!dpUx.\z[M_1xoI/#8޻+nذ_ZX`o6RpP.];ώ{X/-:hok?ݙj:Ûq$hz=} d{X~qY2s^JnSKkbL+/h}won8I>)|0)9elrěvPb蝋c( [Gy}߆9ƎeKT,3B򡑧Nk5b_MKaFl1yRq^Gsj'j>+nĜz&t%M1U"Lc§t<.t'ŚBbt;oۙ1^b诤=ΗcsK։o"_$z4,>$fF*$(v5Cbf*DL_RUZ+Q^^[[^LjuB)7CQJ95f]Xd fԚn 9Plf!ε#4}'1dNS|sН Ζ_eߔG _}\};e02UӏˡVRrw[IYsM}֛(ON/VĬMN潭A m0/OBzm8ە%^\\_}1*gsuC\ĕˍnOwoyŝo\y=.|XcOvgo*H^d?=ήLrn{^6KK4z3s/G2Q穐z]bT3Kf\]epMp5 B5S_'-̖5S'ΔdK M1f8;ZBɩ-3}SL5 b<{]#%Rdzm-4eĩAS6˓'G+h l]`ن.2=pݦCh/Lن?>c}?KOI(?Ƨl! eU}16ZGT5/_V\R&W3ø7 hHRˬ9.U[x*+*ϱO(zTj}";6E~"oõWٳ=t$Kf)2xzr,ZNngo\mjk=C5cwhka3[ n]ZZ%z[ uopk:v \5;CW-kR㞮Ax맫8+jWftoCtVw\gwZ^;] a[+,QCtdgj +t^}28PշHW1Dڥd(; p5zbfM'|az[РW~W5Sk6M_1h'3]%LI:AK>瘎VGF5WN_C`̻ڧC8:'?7ڌ]]uo^J:Mħ->9BXN*,s˥X Olvu,0˕ˆ0Oh3ih@E~"Fy܎O ?|Bx>^w O)Xg44BNiqv輪ɱ%+2h\r&78.$rKW6a:{Ӎ}~xq6?Α/kp W=Q1&Rw5J ={orLʶNJ^:{cfmNY(465fOBJ٤jxTSՖʶ3.$ݛRy`(LM6e R=O3 ٕZŷn* Œ6RX`&3Z0$;Vb c9QN1<֐ɻw!lwRe"ڮ I9CIifL> b{m [zBu g2vk117f(:!)RL qz7^{,`̣g}}Չ!S0hChC(C&I eXc; c ޅKBëS=^bIØ&># >"ff݀F4(O$wm8=J >B(c8 a,sRrX\cO/Ss.nϛhxU!oZlM%U {+2.I0̒nB,FH ѥf ӄI2](_ "O *.:cs[ Qb:EG |3ZmM+Va?(QI[U<تJ|X`,)L cM0mV7]S`+:+ ŅxCcwXŎUIii [`zym!s68|BE (#z*$*F $dKcs'-]+n0W K%c$F viB'+`h|'Xx^E-V(!ȮJ b3J* elGP'P C  Y 1!6KH `4I0TTD3 Vq+9oL;9F*AC-&!VɣI&.ʈq`:b<;JAw/u4V v* ebM.eUcܳ.RS+եqgD0r4Sm ,yLJD b;'Y(E@H 2vomy V&КeM230Z@A{5vicǬKDpbh1&PTؼ)C"/jLGAd"pTƸ6cjr/ L} h"X 4J66j b9PT8xiPLd)p!Jr RGV!˰c*Rbɗ )t]8'HZ R| LĴ*ː>x.N5~OC 1}&H@9@r[ q;"p /YUBN5~D}klU&փ` d-EJ)0۩i6:j=by1k$!%:]%@_>бg1V `*2{ӈŘCZ[Z`=ցz pp>k.WiO纑$e* c;v=.-z֡$}ۿ{'S"b1jmTx֚B$KBr!'ٮ  Pfbz54iV{R{8H!/[t+9]nDxh>;W$ўguJDݹ RA TbK̈ ҳ V0Ud}F `cWP!>GXW,#R OnDNAg +@ #xBaH r,"H,ҽct\:'X& XCژlKc4-pF^6f 2HE5kԃ*LJm<3iXX26a 9Ok:&yB݆hǖ?j޴ +wf*6(3< @P8j HAf@ 2\Ȭ?{F] aKidd f3zZ)Rˇm-{Eɤ$J-=lu7OU{b1%7 !KS@ tQC`Pb.zR [%T2 w9m:ِCL*VN:] F,Ьh7Kd$8D)zP)$klt^Q Neg,0B-?®zBw:Z' 9@i^dAS+|g^-uZkH"40F(xdX jjޟ8V+iXw1GYOXCꔊvߖ3=ic cw/{Ί>Vd\.G<?<`qʃWHp_j}(WRRp5ӆW$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\IWFp$Gpeēqס J+@ ^ $" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\WO*A> LGp%S7+//Aݒ 7nw" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$z \\ezsN u+@i-#k\A)HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" HpE+\W$" \}`0ܛ; V|0XVP65 +7ީ<[ Zlmavч)a0qIנݧ7tp ]!Zm%ЕgC諵wj`j?ʿ,]ؗPr&teNnvFj1 NLB`'z/k9dO`:/К#gtá@Ī̑yT]}y'F^4iQJN4 iGtuJM_ Ѻ+D++i}7tpc}+i QJKt JYs+ "Z<(+mGt]A{ ~t(7lЕ[#=CB7 U QjJ_#]YճN?}+D_<ƑDW {Uƣ *st( k+R>]`%Xo *B2 JO~?t9řqo Op1#h6-A_1*<㛫 ?.wIi<͇iv|y-ARi4n$f>=7|["=2N=W'&rYKҰR5)OGtbb7||d|}tbsmåcvX׻s.,û?gyFIojs"XXb̺ge>hrhLO=LdOk׸rm5RبMC jNB)ZP>gzQi d7rd)KM̧GuXLG}@e]eUmTb:z_p[9%-W96b*FIxb[xo]I&ό ./th5;tB9+3Gtֽ+kU_ DW4F'8jzCWW5 Q:Gt pŝ]`CWW&D:]!JOC^#]Yn%St ]\y_ >]!JOk+'x-KWXDWW˾u P*Ft E f{} *yt( -dЕg#&q/^/LW5/~h Mұ=AW]8MhQ!U!s\0cNM;#_iŽ}=yC8+i.K dhg5X^:y%TЊ@N>tz_ߴgy0eW% F oShmoF~OZ/#o/lqCa4gɻi u >~I%/F|lkB߻ںO;^|uͽ[iJWԯ|Oq+s8N-mwlه6VݳͦK[X4f]8J˗I:o#@I(FY]IR51eV*x^dY>]+תghqWi/Wn#p յmfOqix8'ü<^F[ibA%hk|=-CWUGOC#y1)˾0<|ov?oza;-nK8iյҏa$moWxxvevU-ܸ|rd8+nZ^)Oׅ ސ=qZ7>Ux||ZLg/G׈} 4o˟G쀉kʫtݤK SZs[k18`jOa4HkAx6A78ZB,岈O :g'\#o<b#g3,h>mLb3=|̟l^[Y*|j^ Pö޷X?*2Werbf<[ 5!/C0j ]1s]L):yMr '*v[BXA jzx>pmﮮ wń{X <ÕK˜7x]S% g\9yUNYn}Z1X-%bt/%Ge)(TᢲMj ^Q ùnphn-Fq#cE~go~ӫpvyzzc?(JQEݷ;;Dwq#Y%a9]¶Y;-^BigRT)D0%b0H+-!ShʮSf4֜k gsQ6WE%ZU%nd@f@R]3cgpfbθ3x(ꎹP>)a CZl<^b_>Єhu4猭c TBֳ\J*r:ʇ[>\|l/kv1vs6_xƢ׫|Gz_VqǺ;VC.Q V?G:_xFez|f)z ģMQJ4$0k62 oѳ<@{I ?%ꎖv 8A6c9?ea\Uk%ebdV&buR$xb( TN|R1࢔u"0l=C"q{R\H\xCH!Z~yN/m/o~ӻ. $GO;Zaκհs+ #d\VT eXD:h!q]+a(o:\hh1uu7mxh:ӫ$)Z-kR xtuPywwXW;= 돭A~*㲔3t2^s Q` Rwk2llT"j9Jx}2$k>l1.]]S>x]5p/3t-0S9&!}⪚j #uHZcsJ@9:*bX!ETm50Bv+qj&yyH_OU_,(މQJCO͎wN%_̶'\9ל igeW D#i4Db8ʫ8X\ƹngs>3ܥDqN0LO2٬lxvTFYݻJծHJ %&f]׹&g-|6Lv -|\_&!:sD'nUy- Ώ7/5hnSx>{=,X^-ϡj6-X8q+J]QX7:,ǩ5yy?,槫 o7x%U;oq ?.Vݘ'.<-ߺqGe#D%;ޤ&lkh)wfX~e/^2ptOZc;RU?ZF_#Hkבw>sPJ!)kobȲq]@H)` j "mՑ0yU=a&<ڄzIJ-.pXҚ9Yfw:zp,L:0:9{xByʠXպmif?n;lӃxERD U@o+0mhxNz -_ȴL޹dD&bDB.ucBa+&JG\\ɒܾx99ͦRZ&uk&hNZƴ6s"FF l9_ Kq2cVY8H/~)lBIBUG@,`a[0q'!fnd@KE.%#&X<IB-k,UV9631-tbfi s1 {+5y;[R6n.VJsxE%W[[QU nt$E[U-CՃLyѣsY%TVq#u =`4w@rD'D5k6kW{IПGc.1 d@o18>:!HdFȹԬ #9kYkG #Ӧ]Oa7iӧL0ě͕f|*plсБ) U.[{2U\`Tjk/T*V iCi8/ih:IxnHhݑl&mV$Uzy澾q}>&=W<\%El0~qw`W$gGZVM#Vz|9i3=ZIatiCE@n-JLБŤGZS64E=_{F98:r;9z[KwDFasD*j鉩rdM'o9x-z|py:+fY3V jQ=t⭶`7-Kz0k t_e.bJ}w*Mct6 tZKNR`NZ/F' Ȩ18ɰ}^v>UjfzR@`mU04IC մó 7:f.XYQ"#F`ⸯֺ߫#~DWG Eə"ZM$n]P&P,"} Z$PȲCEfAGQ{p<"gS)^lYJU>M›"~54_uƌ<r6GIlAWٛD졳eM'|$^JaCh ES*RFzgM\i‡gl"}'[PWYtL+cC6X0΁>bi3+ޓ2gTձ{%+C. =-)-K5-e:Ҭu27Үϗ+zHR!^ԹLΘڕ?: ʋNg4ɵﮞ~6{{ކ*uU /cV>ڑ|Cg'L1t(m!a ,>pHE|.OD2h0.JJ*NWcsg<|&8jFtaUjQVEY۷] V~}ضH@HK2D#dUJ>M 1"A*ij_D@EK08oȵD,Fi7N -OxѢ J8srW^fߤR\H|[RdKsΐ5ExY`00E.=s(IjIB?pKQUr+=u{Om4DӶ ͻࣅ$ >$<ԇ\eN%a֊cO}HTZԇ_aChTL~T@4e!-=" Nb,GuJсi6aXS`hP'f"FԪ2C m +6 |JZea 0qEfd.sީMMWka ao<\{lpuyy6\T0f;t8?7<U!^ϢO{»w>i6NDn%mԏS1] |۶w]z}oG/ sQI7Wm_ZWۻtfqf!e5ݭwmzh|SeeoܾJv͞nw_y=Oh{7|)Lfe[[s6Y]w;{hqAkN|䒋L,5֔x S`\w0.,2KVpSMFyNWZYg >eprT BGg5hQ|$KD"=C8 LJW\ѮJcL4ӍC:jD;sނG4ߗ'=q@-9qFJ7賴(gk?>=4RV eVҜ C%AmuV.3/b$oD.A!=(ݹۻѬ]t\BѲPڝN/u]AixJR N&dCc4>3xD[$.r7]h}z!@sYv87#󲜇~2pR_׽:\a:sS~tbEoWAYr"4ZŐHxw;]fd~w^$,W|-P.i%C8,q o;za[I~U*-T%,VJ" ſj-IY}s!LZM+Wꌮk)'TGєM7͏`6i`N'쯯ӷ\,`ԄϒM&jǐamXLd ! zuyy3Y O2y-g;[ة$Mww&1?4ޅaΛ5k[mIԭm{+,^N9In^ ֟KkVdKS;:71sK%xS85mЏ.N_Gd.)je1$9muWʍ5OD?{GEX-;ZYUd 8>yK){-,${o {F#HM^ؖ5d]Yt#g|#e%kWMU|gFtwBh#8knOgm3deӞ>mC/οAc:3Js?x>oSj08 ƾ뻞  xj\Zrw8ԵcaQo2[E^*#Ma9 Gם_]M3%Q5MC @imcIwػ%b[G@ VEzf{>whmoڳjr'T|ܸ0{W|Q(6ǓvO#`r(0a,&ǎ2ch'3ͱ Xs0"QM5|s~mڮټ778h.qnZN~ C;WyX9!s0d;EA16N8qtn An{SZ?fFczHKԤhU0tZoA[r xpS+X]rR/\do 4oY|oo򷫚n}>DwAL^/Խ&"N[h.MD#3bW#O}QVvCز@yYYك3<^bX"pUM#`o5Fp1A S.iaG4UI=9(ݵӃc杨êuR-8$}kJ!8 CP[iR"ԘY0)ıD:?|Qn[.{7ֳ]ui[ju+!炦 8q1SÊ25l>>5lHujxSy ҕd.;w]@)RZ򐻮rj5\Ao)FW;I{וQT]PWٟGՕ/FWMѕ]WFI5ZdD<])pt]RterוR&z5^UԱO/qj&𢺚G+=fRb^ApUWUF֓ttik ɗ3_9;y2-no<8:p?;/'o =j^hCZ'.Fxc`lo>sŶnOn˾}wڝ?1=z᫥?^{O) Pa޼ UOvI"l7O*Ox-=ptruP>n\,On>]vH=J-n^TqOe2M`#7#'^yB[o7 ^n߲\f*}$v3o0~#pӥcCOd4ZT>w}uh7468h,7}yA'Ug yr{澖i0Qp`dR+zo;m-L)4R1# c)# F4LJNuda# KLOW].|6uej teJqI)2玵+DZ0R’+PW\1FוQPuB]QL1+pKѕ҆W(%zWuB]q @T8"+WP;'tSZbJJIػbt1B)RZu ezt{וXؕTZ|(j ])0b9 7Rt_`!V]]U.,diG׶x`ކCBhK?d&,l./|LZPT92u% MCU#t]).T6`2#j= N$ƂtpJi}uueV+l>̩]nRtI2J/UW+5v ]n(&2ZJ(c֨+ANJj7Jѕruա5*z[trוᒔ+2V]PWGH%])'_ 7Tಟ4J_5J8*ifP\12\.&RZ %ԡף+gShn)]~3yjyPt52tRgW]\[[͏'`gQ>4TJYIMy%=̼&(@~ϔDĔj9ZyU<ٵY;*Y&3O5#PЂ 3As)C J뗚gE-qhAe ҕ{(GWyrוQ&Z&t~GQW])m]WF[ƃ+FWK3mueV+"EJSbteKGHT]1+&Jpc,EWJKG42J u(³2ܘJѕF}teFWkUbʀ?vnbƮ4J ֨+H+Hy,]/&2ڥNOGUسé/.1]~飴.=38vyaaBU"Ni*m@$0?urCb*H ̋r97i@k(s[_5"]Igcvp.5f7C uQt/OX2\_$bZP_8/FW])->OוRR$Q‚tʉ ]-ue5Z JUyv}OוQU]PWz,3B12\Oh){]%K u%D~Qu2\.3hKK8v_pϪSG>$_w5 hݳh=CE)Ygg 8dE3G)%;b>c/ G$hU.p!-J0]cY{3 `CMP`Yz;?ˬYL4tW&%9|9C IC |C F-phsr ҕcA#^JѕFIʳx/UW+UbIR`&,FW;V"宫V+q:q8.xnRteR֨+J^ꊬѕb+ .ΠQNFҫ֣+ ]p*'R\]m~(QW1KT`rƮ 7+%sוQb QWI H,HW ,D*WLgPi #XDvѕJ1 6+ΦzhϪS$97C𙏧$KЖ x3ygS p3heY2bC޻ >+NP wr-:(Vlξ ]0btb)2ue!V]PW_Iѕ{WNteK;vC.QEtE ])0 +e(&2Z>2!TP'pc1RZq!w]en[^DWуte\Ngp#+v*u<@A2seJѕцue\uF]BA2`*gp9+vϣTujt{V=:/1-?38 8ҫƅJΣ]j,JV ]qU/n#gO1p LCdd^ +%̼rQ:A yZ0@5'B).fMogV; j;-'άJn4yY55)Kwf<}4 8a9C ty)>-phJ y~,].q)2ڔHRUW+UB\C9ѕb17F(Su]@1R\!h=+\QIѕCAb 7J)RZ*y>U]PWL1DW 2\,&2>2DUW+UdR+bڍ}2T;kUb/ U+Åbte!vDZ-]])w]-d+\ujtxX/Lt0gᆅΣ:(s'g*V]\h. g-׃;P/7D28B|0i-GFKjz/KWQ\,HW%r@MWQ毫r|Tu<˪׋ܣ ox+R{k~<ŅXMtGvlh}W͇'Qζ__.wڨZ}}m_e-kv;ؿߏ_j*^wc/Vh..kܴBtW;[-usZ # z$uwۢݿ$xt?~Vst>=~ONKH)opb oUWzq_owݿxjOrs/Ҝ4iSo;&~۪5ˌBE˧0φB86—u>);*OwZЩ#/5z}mcFg>pͰ~z]m~SQl?߼ma{}Юzxs~|0ۿh1_o/;\l؏ t ilrct4|S`l%hlxГ$b?>?m6-B륍.Nm<Ŧm{A_t~.6ZH}XӀc1A8:Ǩ%^o7k!@8A AZ'cahA[O. dzV^K!=4cHhZiҦ pnC޿{RC8u}ǑE FR4DQDImZA{{z&G5k9RǢ&ǾՈ;ܐdڦqoO@xE6fX?Pd6 vS!jYAc юyr}{m7vԪR8)#SjA+Z* M?)Zfi{њGJ$ k+T' >sC]KQ蘰N F/Q[t*pjCDQ^\ .t]H$wgZ{8_!e̴T`vgMzФWg{n$;T,э"橪[[] ɞ5)0sS`w"R V@ڳRȮB@&Qw_KͲ#oG0L3R/3ޅ*}`5.0![ݫ N6h N@kah:^vN#XeaDM2U X.J]/ E˳ǚژ[]JqeR\ZЬ2vި^џLs [(k)`HJYlXkY#0*T*JvdZ2ɷV&J) [ljU!RcfQ1LhHpu/8FCYBL dJS_%Ce:|=Ʋd2*zbJe5͐jPoB+"X2nP(SPֽP CyHBB("2m ؍ttg-JC(]eԭ9+ƒ s<:M;"q b004KPI!0ά :P%@ `-. :2]:B,  LEwf+%RI9n/XT5oڳFxwD"=dH_($6d^i;VW.#{/^Z#.RVKxܞ4f^n!GAU3_Mr^KYJDA !&dY\D d`niXEvF jE},B\FФBufprN#-]/fĥ"ҊY7ICQŘ@QE!i$]pB6}ofC|]gⅡ/Vv>в^N-xY TA׮z|ou/c&>Hh"X 3 o b9PT8xi?olJ?l:@GVZLAG=$]I4T*#d`&SQd~N4:#.3(Z R|$ LjyU,C Y0~hx L}辬$kIu u<o 7tXTuJrA#+PZDbYьmC5YK1Z1ة`U0-Ӎ?_yy'K =`Ye>ZbM=A!%DB>hAjyq:EjC$*KtPK|̡cLGuu $$5(ڽ,C)y,a3m>%TB;뒄 Xut kR([|Pm!ڃjF,F,Z{65^ PТ td!.h]`mhAIb&#EP] C VA;8*jVTeI|$,R(jS,ԚϽ7hV A{H$Q5$YI$e(m@Vӥ[oU4^"{BZFwrAVmy6,a* 1dkR@~㠝ns:["͇6lh;uT$KT P]!lJ''f= ]Z`- DiVۆnM)DzI(ΓDCkBm1&z~4l}˃ʌؓUZAIE!9)lz@'3Uw[[^tR"˕,.TP=`ePˌ`*1#K[Ƞ rwUaÖ Vc7 Xh!@dviP'7 B`~E bX0j`R;JQYTPcD&7cQGQ1abY;p!-йS @+IUE :ƀ6mӺ`fa+5iQc4A&EJ3i^& &ci@Zv!; tsr:-z*DEKnj-z5zY[Tڠ@~ǃNZS0h¦Q3 _B\ M F:`G򠵧BSp*qKچkCWQ[1xx@A4D;h̦r͸\"AC, c4&0_BE!:.XIWKl$dip/(m*?!t+<!G vmzїW{qrlHo!C O yi3WϞ]A$Ҁ,=jwjj9om^4ll6c^ܡa_0 ~ҧBX(?V?~˩u8mVCg_[Tp: }\Yz] Z<S=b‹pmI8ҏeԫX] !KU,,R}w-ΊbǢ$k>U?]Y}KCkhfOg뗳/,ʏngsX>ScXЗ_//#Hn֟?\"ZiBE"x?Dc`Ieן??z/$o^ =%W;l|ZChMx;9^y;Zz),F BW@ CB+4S@c ]XЪGc{BWVѓ&DW쭟 ]n0َEZ=Sjk'HWN(3O"!>vr*G"]y-R; ]0~<] {;wwCGWwBi#3@W꣇>*ݣt>An>S)c^[ļl1H79xG#<:䝓73i]^^^xn׫NtuYM288nY7jAث&c6xWnn5_y27b닋ϗoݜ; LD{*?^wm1ouF-on_m^i~hGj>*?.-/[omsBގ/ԮOkދeٰ<=9q"&lElCΗ-rUф!"D!ںysqew;NP\ z/SkR)Q8sOmMY:|5jnCAAn؋`ɦ3}i$q?/o;Ǐ^,gW/l?o_67Ow=^ ߤuM H)sik卞|{gצܝb@_<ۗ~O'{o/zۜNLA9ԭ|bQ?{Hn!8 &ŗaov] d0sX</% a5E,*afvt+ehK4xm\л)a4`LI-T UA02X9פ5I Y4@٫'!cP4q?,6굮VYӣ dmIVZS5E3F+TȘ_CCLPve2KAq+b&!'h[R&>q*+O"5H5V/n>C1\9`ɕdV%& 6L&2"\.dIyq* HYr d!3sϢpؕɅ Ç.9M:Z ]Ɠ (i?ŷ[M  y:t^_:M^,u)_A3/7^|LЧ~`Lb2*&ql>jpt{wg -ggٳFѫDU9XѦ]bVy&m#;n0@}(3(qȹz \LqW3t"R_7fիzxQEFr% Io|*EM5{H1?ꮍo7O\Qè:=>JwKq6Q<}Cۛw0]^tl]q)x;~7J4;{-]ok6#Y,`Ų@mv+uU`[wr[[*q 74בi? Fu؆q r,wJ/JRREsʺN/i:߿߿|?ÇoҎGI )mA|Ѵnijo4-O4: z6.oiH2j2 -V !w~m?~}|np=Q;,weɝ8 Mj~8% *jt_*-rEqmޗbَ:J]]*C ,SGuY/vu4(_##qg] e/DBSV$"h}HK(LB$#aU~J0;s)dǿ-s g\3/'δԪu;3g:Ƥ0:z_"^1ڄ& ͥdIw1E^'Ҟ4r3N3oi>؜X3 |poc͙=mc{T3V _B]A9@?EcfwpOzS ЂL(bp`!Y_WKguD.IwrwhԽ)̫ҜA1/,Mb#ƥIoZwn>ϒًՏj[ЗgؒwDR?=OjRg?V4>SGf*_~ !"oƻĕL"S,RJh+BJ_Ԥx4:!C"}Z+THљ*~{o ߂!!{8'U:Y4dh٨LҎaE"c,-n6oXM[1X\UsBԁ+2V’X뽊`P$**'S!4 ÃΙȂ"8Rs:+ cJy9BrG 2Q +HcF58X.3&gU+k$gI4 `r"dP‘Rp>[goY_IvxfN޾0 l![8E/wYYxfyӣ kܞ2{Ke{MdUq,le4* r )lh0)kS'E|آc,ڸs۝\vn?S?7R}x^܏ o✾qy>i(]Y<)!m=|ͥgMsr| cYon_TI4rAߡ+V*HQ;>8'{٣#~BGG۷3 :/&ݬt9[/ub8. 5`AΎٻ}er Et_91%cFA%!R޴-)qy z }7t=. R :N*6+=XO Ʒ1r:b4h3Ef& qߕA6 d*$6>p](aLi!BJhXBK%KV;PDpqfm!K. z )9!]Q+YJ_ٌ\e Q`{^B54@lL^BWk+87X8ykٳW ^ +hZOCB@LSTL"40:ø42%Z+oXxJ,^ ІP# Z*1s&8Dž%&DR`5za2 _*rQxSr))0 )E\#&hu"YJ{h:5Oc$#1OmN+*]wWOtQf7[uѬ+fjw W>s<敒CN>2|#q="S,Ù`{q`15ݟ=~je^OZ~%XV[͏͹R|ҦpRTd\KU&U#*tWyXAԄ _;SZ*X6D VP@ D`gQJ1Q TG#gWSbitkvvw`1囋8 gL7 _ VFA)K0e*LNT^g؂6]qU:63A2Ӑ 漈F#瞠 d n67ט 1NEK>FHR\fk%dZ 1q ZG/ߣi5*}R{1i_c|nP~o: IheCP"eG6'!s,Ѳ)1߰׫3jzSې7O"{bZf Ů9o-9 (:$0Zf萸EH+G)RJF %Mchs^9W^l"?&Bb+*4ZWVH* ZSsYL|4oCD3}R%K\! r49Q/ޔNӘ\?z]0O!v ubm[knsçB]oGWk{2&clcq N1MrxqIQ(hJf,uիޫzV)(bs&DA- (I-eb1 1(97"bR\#Xj4h pVEFMaQ ݺ-C:S~&.7I];#;#J f'TsA c$i0FJ1DhI$V *- C{}^FO_̸ɌRe'| vHNyнφqi0ٍGC&h~ nV l?.R>_-g6rAz0뽟1%5gf\sݻ{WrVU%NHE/8e0yrX”}^ Ir#[J>(|^|vGr,s),_bJBDJ@ &_0\+ ~Ƨ{#+Y•fxu8Nf[ίT=QމKq4,o)jH ${ sxuȤ>i >@Az.I~ƣ,=z MWj;KiۍF`T xK!raq:^(Kv[u.k4 W&a͋踦[;;j=j|;1Y91#hUoJ_̷g7A~7yJSk&fzh4@\Q֩o8\ҥ2'+o7Ԁ]u۽'kμv/]xJi7lCr-i2mp0o6x'aZIw/u,uGl-;. Djή9etӻ'nK ?B$DvyS#Ju X JBVejY\6̋H}^D5mpsLal( cBQ"acF8p ֤8CG'A֑^-$ŋ|ftv6ٸ6\u#fӸ UMK>\O=Y;WrR>.s2()RiXRRh0LLX2:bĢp+Ժ*?J199tDe|}v}ە!(:X hN{Cy7W2:E ,_Y̙C:<N,h}f΀ݣQ!&uҵK 6~ҡ3֡]I%*8ʝ71qV{AW1pʨѯ.~Im(˅oe Z_-#Eٗ0fp]6CreF.v_e߼~ x$췅A>Y/49dJ]?Orf:_;_R\X=Sf[- ߩzuE OƗ)LskPsV s/ʙ*OF#AH cjw ' ܸ Ҡ ҈ !3e)-TR0HR]ro%y.&EwPK$)I*8ڔdڐlu<ꯣ*|N\29+mƭO*S ^s|[vfkewfi򃲌FQ5VXĴ"F'$x(ᇓVU(GkQZJy'+}Ԗx"eqf"TX>]6.M2vB^3 fZd6OMd HgӯS7L7~yAnїA} Ȭ&y)O5RFhT[*4C2'ZK(IItY$ȑ0`#G䤪ks?cЯMaڤcW5X`x$<ñ$不0ب+҈OF4MEŅdFȈ0Pdfg 4CЍpj D9#p0G:HcjχQpKFM>veDY3#ʎ;FK5RtN8X^bc0^2yP$⊣ 5`Hm8: 3[C19t`I&Vwo]pg"yEZ&u&%;"C5x;C 8FֈaH)F* FJ"xؘ_uIǮ|Ê_P=yj`+RQN8uޏȹPdX%?Oy^CE9\U(qTENgGh>%|MXRzpF0?N^I.ؾ_>80E,~(ՕL EL [2pu1 u?ٟgI/)=eh/U%*$,iiҘ}'K3$T>k\ &Jn ]%DtPҕi" ++Hk*tPJU0Et5׭+)$:R]%T7J::BR&L.-tȦ3UUk bLBW -m1Pl_ttr"h'I,~-t%Z8t%JEWmNW]ҕ_XD]D9/<~يFQ~aZ9f =DB_'d4+/Кcgç/JwnSZwgAljގ~_z8xMCq> Ka]2"a7@|r)d/t7t..@ ~->ōQcyzGB,%kdӏ-FH)Tt"mw/DJGc EsvS#PMУЅsc̱|6~cne%ߵC}Ɵ֤-r9LrJ͙"; 9)" 1[&"(*20Gpa F 5}@${Oסy4/*Eߺ$ݺ?O${ԓ%A<[>ffm` $uE0, ~4Q DV ]gP On k{_E´r_Oaf>eiMӫ'L`/H?&Oj4M-_z .;zSuzvR /@3Q׾.jv w^@cV9Y/MԿV//(ɭ[ //ʾg|X̲8,B-&A?{۸俊vݻ`s;H&X 觭L*䉳wjRʖdYlyCJUտ?ktd$?KhP kߴ{ܥ䦕ΗB*d>ܞnݲ3L Xa,R;ĬW逰 ߜ ]|0v^'a> 럆A|YTzt꛹1'<%] k:V5 h~E_tq> hLӗ| /- M.}m[n҃wDj X suwW۹`[xԨLFY*?Fi9[ ߷_sJHݺ@ՏDeqw/R  7~Ġpyp0_`:uQ|Ԭ-'-gْ-D{F'* ;`]Ojx~M%RaﱄWCNJ=e/ ?GS?Q(HO]X}zգGQnA=~7ڋm^ iiJ[סt|*Mb1AGQub-eu".ؑLrTK܇6Awea',U)8h=VW<]9bkrkEW@eXo:^!XjJ&hiK˩#mM 17udLG9DŽRn%rneZGEdQ0A[R.k%%zVXlBoT-pQj*WXyvaUĽڌWYAmy/_~Y59Ba"JGVkdgJC+\[HqEc/yX7H!!@Lhe8w17o?^#K˧pTkၽVqxGoft e&#QHYu2LwZ D佖豉hj4BZ":Н8nIӢhh䦀zyS*М@hGvbrw1aVC0O1J {UiΪ*bOiusyZkB3S84>\*JfBĐ1YfN8SA Is~)3糸܏-!ڢ 4*V"m*@>ʢlrcH& dP @!%66&kևhAZMNn"5+l\L}>)þU.FYqg-'-\(s$' x6]iۖ-;.OweiyW'l0H)$zs\T +=.a3ꮚ{4ÛI,[uHfr'kU0Me\ݤՏ 7_W5A ZOn0ye͚ ̠e6W'>ރrny(b4z4=dBsn֍9"Mj^Pcna3YhyɈ4G_ "8)(؎W'<XWrJ}Ϸ<ϚC|ώ 0r13%˄An3O|۪'U;ڪ}9G>(1"8t`?;cK,1!8wPRitby\7nmG}`8e^ Wi*d)SI\['"EEIa i``{LOP(}b? =ԓ@Qמ>Lv0I󂨟7 `8JcM*%XoBGO"9X7O7㇔Jl,_Xw =I}afӢrp]I0ߙL>`zk3*'QMX3a zC>]X&EC9̋<%i@hRi^N\N W7S7K,]Qjݎ&kbtF0\0s,qn%t^I鲂, -ioJx;|X=HjMud5ŵNdu#_,bnq֗(RW"f?^n˔>IٚTE1/1Pd"[{sEѴ:2O'pvvL޺ӅLmۤ= E-ׅ{\P$m~;ֆ5=/Xxtzi9.}4hN(Ms|³:fot: #DB.`Z1%8"<5T J9t!>*Ip:,"euA:`x_sVǃz˜r`ư1p U$[.1R\<8 :ubxUa:kk7B@U3OMLq@~*Fiʐ]9"e`ThS5rL| 9ZG}N-P!ǂAV ilBj[#adlOWi [uPci-+!.f3T_N-՗;p< _Oqyf.r8Pu 6p 8֨yDh F#@){'iƞ pg6T^3A%@N L#FN+2bFÈƣbbv[uQ[ڲG^|ubڨjXFW hrs/wfJgs/$Zn?ǕXKbDPK'AF7*ڙj-W:m J\kA%q4/@۳\r!N%-U,K%i7/Rg-G՟z.hCLl* ;c-@~0Q*uڵ9q Ԡf˹w[/ΜFXjQQy>񜧪弽T֚ZJͱ׎^H;2H~Oqw Vn}l</g梕:^uB]:NSɴG-dҫGz \fl[x7:B:7iWHI(0ccsBi,{eI0cN d眈o_zه5z |(촾Q| p|F$9 (#z\STƨF#]J7T$4P*"kByAҨK3"Z1%(TɓSZ#a Sx2RӹP6ElWX·Vt6 |' [$c,$K()!qSjEYr&Gu[[j(լ(6o_qK[*v{Yl(ؚ۪zOs ҵng7v%sgf|v"ShSE &%0ep͍NM {A,w#Ɏs9hT VRSh{LӇ|eɇXS &p.u$ܤ',}&.dioKYh.P{?8ZPPkf4}( q};s7ڟДkZRBU 8jlI(OwKr#3,O6PX/w,^j,~[?<<_/цTr :ǝG݅C.}Ҥ`jK'Nsߍ\xE}{vᯕʇU[b ps=`znH1~U^#d j% #4#{aX0aV# mqO8y.> =sp>rbEn.^iԆwivDž}986>-ƵPsC%rȎA8:~ǷӇ?Sㇷg`=n $(9 oD#?U[Cx1 ĸ@|qAWج2f~V˯oG`ZX$W'zDx ףPWl`Z#*v*5"PAft#AuP "'%߈>Mlev8*fRq'Ȋb=$fb~jQ97{'e©Gj#['1"gߨlF 8n3j.ͨ\ 532IВF)\􈪕U r1[o]Ź1@{?Cb*?[(袭%[o4[oGxI^N 8^[j6_돪o~oUσZ\-d4(~."!졙]62 %Go=r>RSp40\=\p4j%{z.W pzZ{:sE%\B~.nYgm$nz^'?~(&Y}kD9ο#JL A /(ha~o`XV^L(ץqko>2HZW??bcU0'#T:յNp&/= OøxyѼuQ pʓp&Oby1AŐ<('߰r`9 'c'C1=\\o̭X!ӻ7{IP&sJFƆT2bM3J*mceא*A KH!)pLL䮃̩7&!Ar60svqiٖk;ZEP%Z3+a`&̫(-3c `l3ѳ&l % |L .`x_H[Ltܵk!S)dZx!fr5Bj'4SIMW81jϠW\37H-cp^#\!Z%c D \er9n[u2Fp J8!B9;W\3L*SEW$HK?W 8/.d/V~oc' 2ojBMU g!p0,H^qkIP5qv akU.4g{օ|\o6~&2sw|՝6:&)Ũ!ɦKF\)aaQgD"'oɻdo_]ӴNV&;r`\V-{o^ƾnZ:Z i{SR5( 8\p"ޞy"mm>c{"m&ԣK MWYnUA}V7&TQEF .R֜dB!C :i'0WeOVytvQ:&ʫq1 hj=mT8]/=nyke?\<*Z!#<`AɔKZKM-{GG'Aw+0`Q8g ,*B :b =OUJz.~#FژxX#͆8Z~`{kZlfA",o<[xb4Z:,h\ X 󚅐q c#!(쁧LfCFCx 8XxNnx&AHk-d!=‚>0HlX]G*S躏E߭7FFW?- 5D&QN% ' {&K*^)X 6z=~?4YoS sv_Q82KGږ(R /x~tO{\|0MG1qA N@-w&D.Q[I2kz)NW=*N,NgLm>Z0$EOW4b2%8:n7ߟ-&aLO~sUl:iF!7Dhzm'zqáq56%IVD"4^.?_ Rlӛeqp;B ~/w,M:))ӻ FK5 $PϯfW.&/4zFVbuݢn#˪Df͚HҡSzsE|IogKyo_,rMrvEZZ9fu8[ <_ @[}ûf ҝEDv1N+KoiBu8FEb볽vε6׽APҭ+xY;%q#:|y+~~3n]=j\x;Y+G~y(z*xG[f݅)xzJmE1VnB~6yJio6=Zr>y-m4ę"pi٦T/D.u--ӵֻ{f>oy'])t6ile'OFU l[3ڤ&Dz6;5bn_O[n27lxzY35v;ݜCwsCi MMOȈjϘ"b\ɹ<3f,.fkc"bwelϏ̋~y^D*|EA_NjO0'&kY閿Y=o7`{Z09Gmp+ G8]+KR?Xt)bLƜmg&dsw褃Nj)9A M W$nm"LAH IyS NeddLq~_rjks{cv>'Zh oۂ~nby>f*NbzB|U 2q+^16\c_s"kdSl5Ykh^C0,r٧!7U؈A#DD Y #+Dj*ggY**H8*1,\eۮ/dj=Y%z>jRmAvͻ޼`~=oܨ\tpMZb}VހԟңnTCҘV>@/HOdv1yMe&ɤwepI%939V9KF%gA洑]>6vHFjFz\VӌCPWBcӺ΄*xPw=Xe_<..kNj<3bRbV n'1Lґ-%q{%Ĵ(\ޥY0c'dB@4*@l;2P3d#gVFjF05XPvڦ2j{M(#]`٨[bi"#M1䉩ڀfFby z1,dbčxj@XE#Ph8F>dT#x_[~[X~PGi ,WP MQ5'UYX"L[rlhTNU@#)E1j2]6e%rBXjC8#YBU,Sg7"} \^g5-9FHޥ{oȝ!zE볽\2ږ1+jjiۿ6:=fA uxԵ n=Ӳk^voݕn/|ubv_Rf/o޵-q$_<̴TTĮWü혈(uAY~fBE#d[@TWթYYIX[ wME9Ƿ8n_eUɖ')[O1bc[9X~TUz2AzO'V/z~fGTv?umOVΔW:#{^zu[Ѵ6^W|4]857]758&~Ylp>~Mg ˞O |~]=z4ޘ:zxb]6ys>o 1;K|I~/0I?g";Ѳ^52m&ZHGF,]f >!&a?px| ~)q[B_f=}d*=Z"vmԈ۾8DHeTa|aY9|-}|<4_?]U'e_^vK F`+NE*A_%ٻ #m{j!'rI0cW @̨hRRĂ\Ŭssh݋^tP(d2JJ/2YJl ^̜vs#s+鿾>6W߿I8Å#ܯǿ-A|ycWf+bj55U_,I]=_>ddJȑl2GSTءMcB\4< |ZS)CqQBhIU":csgp0ӛm_j7jN_Mgݮ,YFZ+*:`'U{^tӋJ7?Ǣw@]_"fb.8kT8E^PDXB_q|HN9/7[~Κ[bo.F_&s2ZN7_J{Q+h{XqwyvpyK&gx#1N;PTJmfYD uD\G#+,2+&Oƣ8+)g津SFIZex^c_ό3?hic8J($N u60Exbp$,&/pa%0S3S|J\@-pRBsKt)f%G_,ELp| ܁qc|Ncqx L QF)Uss}ug@jeMtNxQaz=9

h"OF7x_SAy559oۺ~7L>M_'[<^`~|3^v\ V qGIo,w6Vw.:ƀ`\{ Zs!QǧeK)t,)"?dtmQdC[#cᬡ}>+!y^| DIc^QD wC7PY* 'mLڰ'Djω~KA2R6ySBBWڊ;3RW{# nSڜ}F C)(.C>Je. K lPAz .=};#Lf6r]KnE寊H}(86k4=\pYp+~?2ѯq2I^}{?\-b{;4W_]o÷]Y_޶ b׉J60D6І(,IpAZRyS"JŐ Ť{v|ŬL6tSVh%sv(E2Wt%:A[ps3Sj~t׆YfY톚+E Q.:L1F7L/?zz4D (#G\F+5nͭ*i6bNWZh*w&F4}4-,(*++tGWt(P]!]I'FZj g>t(@WgHWr"}UCWt-tNWRtut`WDW-]!ZzOWRΐ YEtUը+BkePnlʂ&c+.++]ZNWrc|X#*+*r\U uD@CWjϮWCqu@V=uU;Vh4v(jW-J tup׃k#!qAŸ*t3c5&BӄV vM!MsnF闛圗ټG3?Iai߈ 4$L+|Qs#a'k#TS@IsƬs"7؞U Oj5pͦydGbj6NJTآak}g.KƱ;#*Ji44¿-sn V;W'utl.:y˺dM4*2hEJݓzYmh2TZ>Գ1Cгȃ,AiQEB,M2(pɄ'![ɃYhݩۡT'MlɕP]`g+AO+B]PځΑ4*+,y=WZޫ+Bi@WgHWZuEtE!Τ+BZЪޫ+Bi@WgHWFh5F е5t(97]!]YFW"v <:KrR:S wEpu5+BzT{+ɚ0Ts &NWR ˡ+gWLqw pdv7] 'Ξ-MU;gv݂@Ww=XлpFWVvuvs34t):UCӄV qM!M GtJWB3fQ_+tE( Jj"`NWR3 mAEEtEtE( J4śVc Z}+B9ҕ1R|Wz\TCWd QZ>숞#]Y ++,OB\UWWr@WCW2\EtE]=vX5+B{PځΑ*kr#`8y=vpCWfϮ7Pln SVp ?S*ioa]zpQ?@YPmEnεA]L5f#ַe@Z5mNvFT1{7GCj:Yг5VeijczK:lTG:ZZǵaղuDi\ gZNii*+lTCW%ۡ=UŃv(7 tu>t%ZVDWQ ]\U"P JWDW %+L-tEhm %ΐ4kRWX?C*^ ]Z{cPtutevUc w"J tuteU]!`+X5tEp%BWأt]n[Iv[v]A aa%qcPW[hZTDm'*R4nֶ K$UU^Ns g_"]qc+t<שc'8+tQ>7]=.?g*֮# ]=yc62m3eNkoɇ`tt=Z:*?p<\Xhze:t(]hh!]0q8p;ZVjU@Ac;"`Khj}S CաPҡOJw+6k玆[}t5P/tiEF] GT .+>h9t(IՋz`yvD8f=ٯh1h{eдӺ:p}*(o'q> M{ A9jDo{ׄ2퇯M:S`wf7/eIrRo{4"-;;#6MbQ;[xJ穜?O|y7k !l?_/[Ք}ķ&뾞{Մ|)q϶ڼ;wo"od;noCeGy"Ayw}~BL Ӣo|B|kA2C~@Cos\cg. ڼo|^9`'u>v'9d(PCo[jv9zn:[BҮd /7oN$">*@{ޝ__>oW?hjy1?=5\JRQwW#гn*i\}ˬ2*ҕc`gn ד?)ͪB*U܆J}ʹRVMݫRy`tlƢշ;i=uJ[W"DOoɤY3ZD3bj\ah}k5D *ckds-6hFk۫N=hS|6n-6aQ}l gTS;sX;+j:7@l37OԔ1bN",=XBuɌa8h\\D\L5CёBE{))wpODk3E2>V'G@ۘR:Wv>H%cM":IF1T`&rih*|/Dޡ͎Z;pO+G鉤!]T7YRѦEK{qڃcFgd̉]șbq|}!s9 AYUW-wrZ<RIIbU1HGQ^gc'wSt-jI:5K)%9o|0 46d#rՐR<@<%!Bb ![f8zm\"FUi,v5/[ OA!Օkʧhlpn}E+ܔ,3HT';/ڳZnB BQw_Kͺ#oG0L#mTȗBi") |FnQx5Wn< u<աmӊ#k(QJ[e<*J|Yɠ-]. kBhcnue+y ,&F BfCc:vL\s \/ *Ae\bߘuȆո&?P6PB kGU((JU&ԓ)e ,>'؎~okҹZe R5RcM ̆veBCl\>G!hTPk@oJ'm4dLBXJ@T` :2];F*ACM5JAQ"Ł.0͑&A(xkϒ (Ez@ߑP[m ()eXTW.#{$uj{ 1ҸuF`3/M#HH/eJ[ԠˤDdJ(k 6A Z"b C$uWH&мGwU+czhmk/#hҴAxO~Awv׋qcŬSDr|bLByUtc48!bEN`߻qu&j.cYsOk*ՂY mCka&a-A7 /M*}YǪd 6Rt%C RGU!ˈc*Jr,J茸pP4I"iyM,C-l5tq,y~y`4/! OݗMd :Ynm[ q;2p OU߫jPF5%w@6TB] !Hb"ҧa|/_Ywy4؇\t詌 MKе 2"1wPڧy zȋ9(P"Q)_C݅Zc@ az;H Hј%`f %Pv%)5cEX A5D/#+P(\|PS`iycroَվgb >;iVsCbƹ6$I#e56Ӽ('_OGзޫ̈=Xuk80)QC^":$b樇 tyB1C[uw9vYƘ SA )aɔg':RCQ7b` 񉿼]bDtvMkn#H{] +UA 8Ca( jr#F5X{Y;p>-й ᧀ:ʦT8FՂͨ19fwܬhZ3kY 6jP%h9u"1KCbgj)օ'еuV#w *Mf,q+3jlj kF DOU>uժ (Ad+J Ǹ| =pe2㼐ڜ?Д n FГ5>dN{= zV6PCXm(6tqj۠[1xx@A4T9v{TS.\;mKC1ˡ옕qO3KAH28ĦiBeq$@BrۨB8b!(9wL0H5㯶z9˛y\ondB u0]!7[VQ~['H&͑œ&;O "}v~۱71VRl7͖\^@̮6iXݿ~em֧ϯwc~{|gsZޯ竓7of|濌mZOZ/V?c8o_dU"'2[p# lj$}Nqh @'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N q9Z[7?^t6Vo׷ۛ6]ZYY^bG8&uDxKƸڎ?1.Clp8Z@jw\JBW/"iTGDWhj X*NW5BW Z[zX< 0i~fz\kֹ硫ǡ V GUzfZզϳxcA*cbd[Kem OWWKfnh퉲V3av\o=<d3Dׄ?hK7~r&z}z.קMC>Vf/:&^:g ly Cfzwg\?W޷FS^}Wcry/ОyM?g..YisNq/#kl\6aV^?5R# V·3!2dSqYtX.$%(.5v8k& d5!TYdrV*ѕ{ X 0GYgT(g\JE檠A/:a2@QhRMh칄Z(RhC 0 4V\:UWs"WEJ-\BZ1Μ \ \i=u*RJ++U|]|/ם3H?yvUWWZ;m l=**8*Z8u*R]F2FFpE[ \i;Eʅ^-\ȳbW$lઈĹUS"ejp9U؝F1}.pU=\?\)%p Y>#*]9rUS"i]um9Y+9:\H`iݑEcI+j .I{\vzΘcTOneJ+&.Ԍ4u c ~onnzdc ['1cƻn[cAJ0 yWif%~wDFN~տV׸pH-zLvz25;c,Em軿 dl~_foz%A!k5nՉJ\<`hb Kr'7{ޓMjzKfwNEECk,׫{~jդ?GUf#.ٝ^gSo>whL241=$3~3 7E'T>MUݧ*0L~jڅǫd"T8ʼQ, OmjppNZR#Z+a!8^.jJ"Dxdٍί-P8VB]p eL)#&L xs\EQ\oOi=Y! )H \d>w:R s-<8O W+xo~7|&މQ{#C.7u&qkv+~Wh]+;A5\pa.875ksPE< MFIi=Lץa TR&K(Uvq&5&6hDzRP `>ik=U~Wѓ%盭re%_87^<>려@[s'?}3O*`4S[q0ߟx@3gTu(@x&iUJϕ:H`J(29+oARϦH䬼d$:@idlL瑱96bn U¹Dw_irV;({?ם& #XQ1Lґ/%q-bHӂsix-AfEP=!) lJcQb.L9Y(ʙ\IF #vc~a4ƛz|6xS4yq5݋/Z̾5!'(M[lkw?iʱC~\cgmi=fۣ҇:rxQ)6^ paj %L( Q tV%gedB|#A"C3s/>#ncWp ;1eNui5)ZGg(ݬ܄'!He<=2ȔBG. 2%}ȹ&=&x5:~tƓDh[H. 0T!VgRUKRULzJ"mJטRʼn%ME\ftqxk5-FG0 .ǽE.jr/_]0&6^PMr+<'Ov #e:!=b yhAIܖ22,cZ I晣Vą@`N0cW¹"OJ%)'!qkTw$F GSXr|$y>&$٨Gu&;sRy w4zD!CG6+E0ZnSkWlis{ǥez;[ERl?OHf`?kp,Ջ?@kjҋ9c}7w;X9%nGZRô#Y4SQ,VgS+/p d)AnafWu2e,#ʺ-71#Ȩq\gJ-#@H{ʾYh1cKt%2iR#%T! K8"q9Y3pn6c' xbNzc 90JRQk"h|qD,hac`ia2OD8TC;sLj1V$"n5&ITHȐ]"rδkS 6]!iwF]#0NAŭZG!8= :('#g> ߟuCU "}$V(<11urɰl*h !|:4}'N MсPmRBl Ӡy7\);=&yIm=/ȗ^- yk~H1+PoX!$HPNF^i: GWXU ,̊ Ӷ/IV;TeqOόM1W[:ŧ{C>oϖ4hno>k9@?s>| O]w`j H{6K~0 ],r!?|P&+|>~EuQƗ\9mgs?leT2yiv\[|VAfX,|5ƲkBf'1cQR#Q*TGT/9*wI䜻!cS FxƅҪղeeY]KGΌt-:@ki>2=8c>PC{U3ɝFY& n8=`TXuBPƹOIB$2A3⩣ً'viLעBpGظ㇎2_~:@d.|H>Zy$)ߏVS|OFئ]"1o\I%SjVCyWp<τpxe4!)KBrJNi$AX,@FjSsNpQ!Mde&PlTG#u0xɕ--XiCa%O yD.pNVƳdt!grlKۡߵ@|7U]'0K:'1-_=_LzK/g9~,$dCS|:(t%@"%nu\2joķ%^Ȃ^iGQmI׿+sYowo*D?Joiuc3ʤǵ$LM&, I/KkĩgC))%m&wII_}w8g=q}닛&n,ּK\c%IgNƵԳCճC׳ճLza A*4Q*T"{üg G![F\)݉i[sWiw$9;ܑJo>[M8Pd ըڤ\nk$GJM.!Z^:l))a - z b"A$Y&˼ PdVx (uH b\DϜR7bGL HAYu l4/Ȍٗ_όuNc!x/T%ٙc9Af9P,ߤyb$ᠡykFBT[.V]~fŮŕo෭~s$CyQVs$~_{ 3^F VV4&xWgirܻvB;xQs)rBFI9X͹Nȸ N8$͔РdL0n[74@gVyOO;v7^3MS䴖0ADur-Y: ^Z)hKJ+cdhǕ !h2d Pp4YkHkĐG"f\n/ϞnLߪunsov`-΋܈@%NDcTF\Z$VF1ES+fg0܄zBON:&~q,U^/5A)`T] OXW |sc~׿N'v6p3Z9%~7{,~Gk_.o)% /U%Ri A$"$nS|;e`ۿ 2n?۩DoU%8uLTRp(ɳ?دyr^A(vځqfzrn>FrE$#,g.O+NәTvG/^,;}doߏMN 9CLvڛ\OZR*\UkAr|[斳tGW!S=~Usg8M]_=$Yz".;̿W{|o6v[$ns.mn4X gܖfR{A-8 FE0nBW~yJ1N4㛿 D72P,z\gg rPKYr+ۼL'E[n;$մC{{f>ۼH `8ޭBn"!)]h [_ދ {:6q1 2㪋n/;4WV]R|;N)g4\Ւ39k]rǘZL#yEUZx^?gߝ|xn1(Qƨ !(tb:D!Oُ@Ђ^#WO8NVƚ< fvXU~e߳ª'U\~=;ҞFO)zZEs.y|f) y|a_a8  \eq>rpR2*Zzp%)P`#UWs\©UX WAf1ٰ+Rvp䦅wW9WgW(0q6pe\ :\e)AfM='IKBlj츦'J+WOiٮ8y<\ڶp*\y)>܄VvM'+Dt^dÇ?,"6{89oDIiIyAHG ,qktX곁,fYZOQJ}[~G0MKtULi/ƶcǸ$p/j5΢_A?n- ߽d\zŴ9zq>&Z(pɃ0u"0lED3J98*N͟p!;0_K/Qhn=>o~g;W&Y (ךY +^zՊuV<;mH(FދF`>:`%9Ɉl\{Xt"E2RH%xbJ'=(er}3f1rU%e)2pgqMkx;{PbfL"%ܕ`, 6F۾UQ`*Ugc EiWUUߣ)ZBO \eqJ8uRp 悟\@#{{UV?{F7@pdf`$&0(ƺȒW-,_[-!wlilw"Y+cW$| Vp*fdU#+WWԯtS\PZFUws+) \ \eq,{vRƆ@W LRm^jԞP*K+~g0K;vr!2胁,.WYZPJAXWwz' X>]'S;V?vhW=%L=`e΄ {jx :eՕ)Nlq/_<4=J^74 hkr }(KV}\W L.LhQry0p>ȃ*K+Urߢ:z= B =vîj*KiHWDNP`i<Bi5#WYJm:zp%~ܵUB;\e)U >GR*w@p \LC+_jG)ճ+s&3"pMˬx^\'5?r-AABE]BL.櫌y̦oW 2+qu29_];rC@ACQ]?LQf$Y\K\T;eS˅\NF.ȂJyR!b3vcJ.u!^/Tؚ|ѫ!:tg5qA-B3VJWكg1$nUWNWFNN 3bu0=Ⱦ:2\ooFp )`DW >N>YZ6x_1F?)7<~}u 7>r}UUj._o9e{lf0}}V7K9cC~P|8;dPlUOs/\TŽڳv6óIVB\, .%d?msw@0Et%&*{ !q {3|.?{%Yb׻m 1­C__ƓqsِjnոfJUoq)cu7pyZQݪ¶f'[;d`B*"CV͸[KMxT~6/HKALlX׷1iaըo]VRP_NK}͹7XzoFЛ} 񨷘2>2|UV>*J.D\:REMt2GeO?zdG|&KJ\lT6K ahtIbz'hU&~ [8f8c "{ܬMN[K?u&1>bHK;Փu8~QXS 7*Y[@Y!xJU0Py(hbhQ ]6Vee!5oe}v'}-ɇq6W[$;J8a#{zbk,uLQa\k XD(l R jX6Ċt=xjdboh #)di,f'kB !SHSh P*&,B"74bMfBIFЩ\xؚ8aOt:q_5uˈ;D&jA $^PQS "F'):¸H"M$ShUD<4)AhҔG-X8h qi*Bpk[vDQtK`v@g=fq{Qv=P2qu[˙<0\51 ;zmh V ܦx`t#E0=`-E0*Ԗb5HҘ  w:HHR 'I$͓n[҉KäPH0-&763st#0i5r)Z[A+4ݣjc/K2 N)%|Ę,on"BX ]| ÖaT㤳*.+cIa)N9DA{qo^Z;t+epJ4x{ۋ 8c ] E|eƇLU L+`ZKІit҆ 'Bk4͡ PXlZ(vpJPq?j8B3ֆEoؘ'V !j +z1Rh.;۝?icMMw1$ApteFXge3E-ZW5ംOÉ9OT$юBgHN 1.Y3J e4QnbShJ`ɘh" D$TRyIyr] Qg~I qW:X}yՕ갫 * WsމG/[,;Eyx+o|8 JD|0Vt˼x#Ԡ#׏ rgYcq+K8~9C<{˟FP*zm %u fq]T a8^t:YI}Z!y>3WHD5l/-ʫPh0;=4/Hf0peכf^=6q8p5 ӟzڴxW?wj}}ƫdqu] n| i4|R-m,ƓoYukTU`YɖD}dƚ\jj$[i*4IO؃,>.]g74N6j\벑jVJ.-6!,m$W,Q}~LC.z^Boz6O{z?jo*TN.l8o/Go^˛?~՛ǔ/p B>g<\>|GUQnjoY5nN ^t&oHPʙՔ[zͿepj6 gM􈇙pe^\~F3?] "*~*j4VbاQpd;?yX-M޳OH:e6q/~W?w:B~䥔?3 I$IS@@p} Gzņ%O K@7Dt8^<1Z}oMJI@)b-tJ{GiG YiK?+fTVB@tI"h%z"3N7M*UީiCyphS: !Jy(M\qG 2iĹt!i,#WH;f mH*b UuЊA|cBE=Z7r˙j@e˧;G9A#Uua1i02ʔd_+/'%)/]I )Ⱦ-I>C^iy[{9F>-wW/j͆BϳN"yIubd }$NYxcj]*z܃[~jЭă`$5Xtѽp 㵢6 ݁E擏3rTUCz8RXbP߃  ڈQ1L?ZFJ|fĹ\=9ډk+ VS#Ԍ,tbqHc2e5EJ9O;zvdg}vt E}š$]N{Jߠh i*8ꓧ:zr;aaN[;>saVD)Bȓb>QNp" 1Qjѻg:aL0ϣR9T0-d85HX]Q?{WB,0,~Ч43CjsIsIF2?XfU:RxA b FрiCʘtȱ1rv_q`#y:pl0Dz(;yh4Bkx[̤ʓ(4lrD$;A 0Ut󍑳W7~ם+ZakζZo+Z4K([L)5U9&,R+IS pjj$7d{+V|q~'Q;&E>fĽX <~ wār\bۨ:xSFrXBܘ|ZgM>Օ{fke-e|tA}Ăz TQMJG `3ꄷt2:^-I^r>Ujv) ж* 0p\s_lbY #h>W"\800/GIqb=:Gttؾ5~PLQ:dҍBȉ6Fm &e@]E4j6ϡa9$$V8m0㥶Lc1>Fob mM}MS ; G.Ig-hx l0ãCDbvx˄O'9o(%ʶIO}AZN9N1+@pp4aDy*2%5Z)0ePEWə""(q~[S+S#2\*밊ƥbPHʊ}e"@~($j_ >Ͼ)׍E#.K|k8[c ,TJ03ޟ_C6/ Y*9/ʾ\"C?~CW4˖A.<{_7Z|._5 ҄o ἖_z قZgDm;\FV7n-z!H%A UxM8\V0#)6gF\s6;XyFpʅ4{ª濫EY-ڊ6/tݏ72еٶH륐D|LyY$a\ Dl-m8)Ti{}D.\9w"Sw'>Q2 GjQ\;c2(P۔yYYG,3ÝS᫓jpJX>cÂO8 к2&[߶W߫A w]w_-JPKwuեrc˪;U`:Qӻ٠ ό'bT  {Tѳb/π\Mԩ?;`5fwNH Tހ?RHٸb PNzd l?&"S,,y-v$(#U}!mJ?R4{[m$cAW9*9NC&hiK˩#mMLn cnk[r 8 %rnS4Ew9lqrrX+h$ { 6Fv/~#i sÆ\*JnΐfGviHO!D糨Xkh}0cpR'{k[Lh`'4g z(';TB J!Ӫ '\qe`! kfh@T*C}骳 dtTDW=IܖQ޿-}"YO꼹 pOm/L8 dquXǮ;4[1%cs>g$ Q ?^M,ûlk}a2apYY,g;)TuFwqIre{%&ALdDn$m-IjS[х.&m}QhCSj{bYͶ^nͲChC˼^wix;'& ~݄9-r͙A6ƒEXY[m95١jC gpZY1M4^0!QQ`u8G{s̼'Jho/> ƭ`ev8 p~1~uj _ 煠BAI9[y5m `hn)1H-h[DȡA92uD8n$a^I($asȞJBypJZV#&-p@v옳M<:-޾S я\e,bG"E2(*s2,j@3FKЄj.WtLP= *O׵he՟L8Gr{[w;SRypfdJ@Ӟ3i$ "l [4;T@Zj?D؋DHk~#  ߊ` QrnDDkŘRƹFT+ii@ 0M!@[[);0(-9s,ď"A^($f d8%8X),}+z֊V9cK,1!8wPRiV4 սa8{a^zR3S0<{wD((2" LlqQ>/uZ]G!mD= 5uvv'!ԄO \7zܨaq1#'D6x!dՔו4&Wɸ?r)8b7q˻?(}:Og3ܘ OU-6[\`.pSE/tó"t- ݠ_0"U-q@swPvR{5~]1;nGD\"2vCϏfmwD(FS~Jɖ!?_FA.Eϓ>YJ BuΧ/̕]dIg_E Ld| 8@¯Ÿd?LIRyӻMrYs ן~?߃|(i..!v SG8MKS$OKoh2LbyKz҇6޿seKqf.jSVִ/$$II7XC]"*2_h?n)7͌f0ѿSur-s(X% X5u%~Q:>5sK| -]«ZXLʋSg)pUVU7A86v}S3ɔ[D;Q6_-r޹`Ni~سq[`OvR$/}'Ә'ҵYmz< fmμSN=_v*ߔ 瓢{|4Aj-ny/GF)n0vong$̱khT;~umn[Z&uݎ逧]Z*'==pR<YN&'ʏfqa(pp=`%N> Gax~xi1L dw.ܲșmUt`ܓUP…~ m0${8nmyW<GpoC)'dKky7L'jz7dX~OYe SϪ`*\ii%8%58<8ӧ^ %yJl ;ʭęFFM$<m$9[+m4U{GM _VD*$ϩnnH"iSUgJc'm gZC] ?m\6RE$ R))C -gSJFÍPsNtQM4WG#u`4wH1.}g!TVXe%Ơxj7F::^gGWd97gorrxߍٷmcL]lމiu鯲AɓlG+zf< ~? ~r5{f-t)ႈyͥPƬ\V^B8Sϕjgj0 D‹ ߝT uup5wdIYITA 'ø!(f|Tq5S#7e5QR{1_0.M/tAC<-nߋwf+:)2n=Ph q2Ke%H'DB<|T[ML&G͝yI20$,7ִb}מφ?Sq&Z`uw6~y\J %HܔQcL>mvn[.HKHE*'tҙ21ɜ[rx];d9JX4Z0+am,& ѣ J/iNY˱[{5eS&sJa* !K6τ[R:Zku0G \r5zߏJET{ 6 +$ky0*k䡨+v۫L|ێV]=uŁ )H]!M&WCQWHblUR7Q]RWp0*ɡLUIe`4BAU&WCQWZm]]e* k3TWp+$X9*v(*S +=;mgUb8ſbEQNkx9+"k>J>}Nyۜ5šLm;YN%#r}W / ///'|9 []ip/6Ksjυ̊מ߿.By]8Yo~:zW+i*.OG[俷Z?3^CKQP]٫8zk +fTo Պof0Gg--zTwx>EN-) q\|4y˻gB\%P?#//8D.'q6ks@X)\1enwЕN2*NN7%&s*;eV6};r7eu"f3ZQ`DqE $}$$@9'rV{H$hp\Q"7&DW=Y+ #שq! SE(O ˝a8{֖gS $gu|IY0ۓ \ϴLkɓ2V$0BzhjBT\f B=\cg/g^^[_QG3-RuulNrܛ=~kz(:̾{0$9aNw-.l/N~ŸB(yx+Uu?p70.@ć΂pe[*Dt( QB9ak9׹:;+w3KSߛ R W%ob.pl q9u6|EngUktېzUY{Y_k[)ն5~^NNfE/adp<vZ!'Ͽ;5!(MuOjj ډ 4qQ 5:YQ~W7 ]br:_/^_klmG^W7띟"M2^Wb$ʑ\>ժaaZ3,ocW W0bBgcv{&'kqTFu2U+͗I%B\q"#y`_Au.}3E?Ym-u?zPsCa^vȎAuto߾:o~={epv__ (z4 $h= ؊~кqШzNtxW&b;#b& s/zuws*UѲ\+mf5m1W1C\Wli?~B31UFWR!> T*z~Cm_mM< #ݖ{|Zcs3 tŒR IYI [/-[6LT^YIӁD!y;JY" wTJ"!N's_;Ft OcXH0SY1nuV}L\)DйM#Ll$PxR/<V*(tҡsR:%s ϝot@[]&l=-S (qQF%&E# pZQ\r!c,,KbHRO `ZZ0;c"x'PHG5" ۹1XclyA!SFHVI.bhwS7iq[7sWf+Qz?d[04`'y e ΜǍ>0 G2yM4.^ P:NS@#Ht&d|oIhv-{R/,QPqGA m F*| YJ eycksd91i!S@&ႩT1+X#'=Ym/O#qD}|=~qwd&NPڛE^OA\7=|a\Z a&hD5A%=QEzSZـqD>ި{CeBdRe p[I.g""P͘[9KR?;A?J|n}+ LQ{pE,Y ^[r\|$w$)NY]=9ܨz7-Xf z7T,VGL?.`AӃ\ ^Fq,,lv˃]a1/7p;\l6 4`9."bH)C2!H+r,H & MrMr{(۟Րrfoݐ-sh[DFQ*vN9b푌jۚ"\~>q W;$ ]{%YxC]I{,R*^e GЫLj0JBiС,=2ieEB!]3<>{*&׍e#/SLnw~M*P:.滋8o.C],r$|{?gT!VeP:Eq۸ȕ.U[_^soE]L1羖C>_6{_;sצ 5@uz)m]q ՒKu0d2ZJ-Z2JFZ2ϰ!HI?K0X&sJa* dgB [Ho. l~^-(lAრ~k1bz䟗ÈƶO )1qi;WW^StG5o ^SA|&FW*hh b[8m7'E3n%$|X[h?؋1BR 3ei!&jUd폿(or6CMr` 0jՏU{;\xGǪOh,7C8,erUѸ0yUaG?wA3jq1PǍ_ z%/:śb;D{0Myq%׎v-&z~QwJi>T[<,{Фgs3ep?ͷ:\18 =K}t({Ƒ;cݽ{M~Z%KRr U _C ?뉛 y@+G!8yAeIſS*f VoCx@LS_Zb="<sd&G PUr@FLz#bP1$ +ax%)r622LG eZ30{-#1FSҒ%v&N˭,rRE@ac3SSG ȟE {KUdtҌ c0j}32+n{8"Cx}EmFwF LQ?3ɡdfT _k}ϟ)CU6ގ>#-Ҭ4I41KCR/s7û9b};d?$\u\V ObPNM&|>r&9Р_|Rkyvr< ^_f}}ϓiIveFlU/ .rh! |l:Qp֜JP:?.қLT:/yo7fJP}A͋+$[ԼTr>$*fw7\΃Anw qa֫GT\h-ȩwIUsYѶ1W;z(mjS0a/haN+mϴ&>f*f=. .GfJɴB0J2Lx])"%J|Eir$ǽwxzw8LqXZc[.AA0L)TV 0"``JNY^xZOw}LJd d/|DJˠ`0"q$: i Ӗ)PkWTFGXt: =!"}:(oЇ@DpC`X,-,$:^3q9yoFq/\]X]LU{W}졶[N3 NG4&~Cʮ sZD)Z2li%%3qYOGY)y?sݲ]2GBmi "ZDD9c=Q)V蘂on0I^N@3G-v6_L87n~3,J t }hKCgz=3Uy%-;64q=NJʏ{>{v0橭!kwe;BlK*Y0R`D$4'mj<fOӋm4kUߙQԽbh nZ47ysft\2x:sꂟ:&6e;)ZN˹"ޤ-)QL%w5s6O2(sP{H2(sې .ah࢝[w[#ڌ3͛2g3ٗR}=U=bLDgs6"۴lN|LlZXpZ:s67y9h';m [3*\KTqBwg&]=st?M'MU~֠M.òASc9~j?q7Ԕqr-hL0麡ףTC _Xi8좲p=8_-jlVt6׎"[z+w(;{}%G]9hSEGxHDX`&o]t=-co=y__ KV_@՟ BU8*!+vsܦ2H*+Ɣʹm*fѫMq S|ߡӧ.;<1EDpaVrFaUR1]2©0')@91xt{gh Ra}Ūb,bGӁr(JJKaP-NP"D 5'^CInjqݷ㾪r/e.J50: h"=gHD FܢAR9YX˵a'%F@R\#Xj4)*sQvk)|ఓXofI&U Tڲ۹%JT 85< N5dy`~9 ΈB)&1\ Ñ-JAedx1<ʟLO9Ygyp%8$#GNP#QRccB*5u~<]^\'L;xZYg8Οr|^ W) vfJ:) ,J cHD{i7 Uh0f.CCۄz(ڳ#ԅO. QzfGN1p\Ƿ_(<~;3ݘL֫L<.'kQ }F8|3^D/tNjAP/Lvqo0$nP1Jz/H!zӡ^7S7PDp'p?Z3?#+W@cH⏋OxAbKb H[g8J (.ooxhAIK/<"GD)ŗ܎hxM i`nc*"\]B >9vIp 986P%*}66rR}Bɕe.ѺӘҼ֊$K"_.ShZDJ&̰Yg{KVq2YNhɿ7)\:e̤2 ><7|[JiȮ^6O/L x?h8nlU"HwPThgM{ :K9S4WJ]v3.m7*s?rA♱$I(-46})ɖwWBx%“ΰΎs(& hh(9XbCa{.QVt&*a3]x~xGM` lS-ҁ!h!̣xF&,QX$.uxwx{pg}<|cJ3ѷR1WAkUu_qtNTTQ7K?Of2nI\J-4s-M(Ml~]=_[<smykhxb ᷪoc5p]WȮ%l+WaV[_pD)Y/oaO,dKWO^l?}/_SҙeT`.x`, *mJ-An5&>DMri|n~Vţu$D ֧]俦\ORQl"!zsL-ʼn""<5[c5'$**RF+]A (>irzPΘL`ư1p U$[68C 'Av2z.~(ݞ֯tce\}]=|ߥ]6G!.^bQ`úGљ" "dۍD%uk.]Vc>Bű8Jm(H?߼wy\L~%}. ǟ_%!؅߃܎~JOZ+ %%]J. ؃,H![rl8ؐ;nY9{yz}Ƅp!A7m mBpoe՜^u[^[q☿Y+.T/{T3{CLhoǃI5ᆳB(ܧy^|Pxq `| 4r!:b*4:EP [5.xC&fH܈Sӄ51D&("Q19I;W[~[+XM?ED]u="x+1!2IEy伢qE" I#Y<+*Ȥ7N\D5\pGKKV]<@@ Ns %vK-q#b[yQ:iɑ2.{\Yŀ\s9Y%+"P;i"H9)AH 'q!o #z\<.vE-8y]<\ޢT`F Hu {3 &g]ĵ\IZEJ`7 3gW$p\ઈ\HpU\[zz;pgWEEc"=²WEJS"\I-SgWE`u>pU5g+aEJd=\AR@gW$lઈx.pUզpU"\i-3Sy-.<LllZ 7F{6ͺJ!1D 2j۠4\lk=b5/"mJ[2I9kǩ?{n'HfX`i'>q(O|v_~~w?yVhwt_&\לhO/Id(`l5Ӷu)ыm:C?(xsOgH+iX7ΆqlRuAO "-ukmV@bBB%H51ƨ YrA9":e.]ε}j+ygDQIj"u YH,[R'5*ȤVpM$9 L^K i zi*xkb&B QJuL-6^`@Ͼ`u":\'etTD @.?hEU ͬߠ.F0k]A G}S}V+ξjYVϲEztF3#]mϴ^i:ӊcbV偔mݤF|MJ3B$J%By8m׼ߚ:U㼎GAAA7Eo+Ѱ@K-%PD%8r%N= d) ~%Kuf*cV2%"dὦB+ = jX:g~-gGM :/O3aHj`䣧D1'n׌ŴF:#Fl#ݙH-R)o/sz/:C/>r!xIut*'6WC܊'zAdCŝ$BZ|Lm,h%dT39JҫcQ)aȻ`2,sܖۤ|# ~ZԾ.> ewtHNa6;z>j*ى4EY6m_\O?\LMd2ZE۔v#m!҉r#h_YR<~[dSDdO`y[K_&'(Hs"spmJh RDnR ]/<]-EcʎĺMB [ldc_KrكKflu~ekiD宯bO~m-zp|m]֟&&܁OSAցSkp'N26잺ِ/m"hhtC/Lz;fhwvBU{çu{7fef^[fɎVUCts9 ](epn=zGXj.5ǧ11/v5&ﶽJǐvi{6JSyEXW,jv.uHZD()E-Q2PȓErN/ӫ7"OGcyY@KF :hXmSM<#o?懞+# ED+,P 9sAd& X@E)9(.\fӖsx߃-7&y J:7D0 KO3JYY*;(țV I=3 \-P%oDLDVz9"2;&΁ufgޅ\߾R2GW1 t;~lyxAV0o /G/p3z99a0f ,$$X ΘĬD~zD8 "]^qAVAd-S) k mJiX=VKlzCFpeIiU6zx;ۃ#$L>?cD%p~/5x`e1/&q 19z(B1Sx&CVCovRs9O(D<Z1Fgga366Z68#O)t݅QOJ#qqVoH_(Ir 2AZG%*K5ːP |oլ:cާ[lPQ82O'ڞpl~R /9}øRDl)JS]ي%F;E0Sȥa0~L1jbNNgAj%Wo i+܋ysr1Xɪ,n@eWy?z=}?}pIyue=_?_6i"|qu)҉Nq~8_ϊ OƋ6rl<D&XB.a-tOw461q-<+>z\ +/EUm۝~ߕk\ۤpH#Ӌ/w\=ZƩ .s*W_4b)/]H~do_`xFͯ&)}F-nvOI/[mCW9?"bL?l&'#6ӏu=mx;MƈQ2&+eL^|(?88UG&8:YPBdSmek[TĹ#~:<ԁdEv:iɚ~DcH_GɘeY*9}v*36xo( #he%HHo-1[_܄_ܚ n~l~d-яsӨfMӌ<5豰`]\VhP:]Ik*;γl2$P h($4 l*pԊpصP8E9 ؚy\ FL" D!B R. t]S+0DmHkY*%/t !xfԕ2,u$D1m&t"\?RJ*qxJ7?bv~~> h?H+%O:G>(PV)TΉBA#x.XZ5D)$w_d0OS7yǫdSTmaR 90ёuoPf2jS-BcP5{bi*. OTC]*Yf6*bj%;Y(1.9YF<938\SC lFȣk$pޙbDP(N9[liDB3}P Y/`Hm iDy @2?ю! ɪ%JfckF1߂OץZl3F_KUoMW.%]LnYOV_9<hs9PzOاR?r.OtC x7"LϤf؄=ILꂌPRH-ޙ;5]{~2ɗ;{ukj泽lM];Ͽ]tyx'y--Q+1ZlfYY~εOBu?|U/Y*qwNjuE_4WtRFjÆ/>D?-6Nwz(N:}xzyώX7?_~|۟KE?=87pcB+.?$O૦G4m[5͛f􀦗LAg/h~e>F.YOfb Hw.??N?Ob2_w$wo-Ո[=9dȜ%}i8xvUI7>Oݸȝ5)2G"Q,"dpyjGF޺0k En%K{h\E($(炋qR@kDYB/kz'Ty6tB1,hI X} X؛'e >/5Fyvyc"6G>p{_wlBo{Cױ^L83Œ҆eLAj3#)E&f|>oZpt&MavRsZk;MBQQ@R&lT% 曉sߊ;>?=(v־66oy,_Y:l|l)??Bw"fPPN*):(udRnj&&yGr~ˍm-z3gMF/F~߷}:jev yNwG6WaaO!j쪡|wAgtX]Oqx?֖h( u9U S:+Ӥ5èL1ǜ,=Ɏk(lyH@a^H@WfIe ]Y\7>YoE.iK<G%_yt~z1g1đAR5ک7k- Mˋi>Rn4N?/4:r3!OSߛ,m8 b 39=sF7u\?ȄVu)sz{C}Rs M:c,;NCo3?yY7Ќ~~.K(D`2\ 6ST%W)c"tg'Iګ5hռ>dQIéa8oAvC`ӗ<s@7)Il A#uEtZЁw9JOeҩ$H3Jʩ*m?TTkx/|OXJIjvdclRgF%ttCUffodeUs+ȫ9ւ]MUHkˊA +U;3qiqeFքy3߀Քkn-Zq5Uz;qMtEw5컚j72$ W\YwiϸN]m6j+OO '\}Гd!]d3O.|I@ݢɑ 9Snе`z5;tLC%xb'D+ jp5JX ZCT#dM9\M>WSCT'\!#VY|pWSnj5踂NkvLj+u4W+%j/JWY5 5jpZp5J8t\Mѝpu2 sjp5jڡvj1*QPI+GZ~)7w#\Mfp4:w+ʡ,[ ָo\m%7>_۩=mNؾ+Wzշ== вy ^8P!ch7t5hٮaɯY[K=ރg+iA|rt=e8p؏*d>y6NOY=YӳZJ[ׯii%jײ0&> D#\Z'j n߻S+q5UpubozOq88 )jʍ3զt踂JOzJüpEWr|}=s;rwA ָ\A.j N|kWSe8]#,Dk*!Xd=]Mv *N:B\%%M/9^\MʫՔk\AmT'\~pr 'j; Sk{ufq \uE5ě;[Bw>.^E5 ӊ0MQ\!YׂiiT> "\ApSJ+i-j Tt⊍ȥj N\eX Zj ᄫ#ĕ_$}oN-<ʠ'\!oq-piπN7pl69CTvJg+ՔW>ߕq5UR1*jr.WLa=g\[M1q5Ui(p_ }bhx#J1gp˫b#iwÁg^zCAP 1Q36?(7G=1%_]]|UԈ ̹7k4&_] XIU=fӴC~R.˻ Ⓧ}b3i_|WS->7Y?^0~ڳט)>oYq6͏/g+o?~]XPE   ݲ03pos>33Q7 /34)#==<f7Poy?/uA/Jً Ը@ҒDٕNI+e,Ow{?>=H0#Zɗ\kDBR.71|mF2k~Ǝl)xF#wE5}p)Z(o BAzhst_T+yݥ{Y1J7V1D:y48Gg Ȩ./M(-)RRA5w%_ "$U)l9tm݇ki&%dQ,)NbZĵ*PN9y6ǖ*9ܜ,DE9mQuAgH!!s'# t!rD,+MVZMRRĂ`\ h.*u8:A0O uWcި%9; ,X Zͮć 00!0-#1㤵,DX֝`:.t`60o9cݢПjҵܭB01Xb}Ƙvl(sa5h_ժw˽AIU!BȾ%x_@1Yܥ1OuƎ;#RI@ eⓁl`(:\[HKxdF4C2 .p7Xa}F2YKl0ƻuSJs~@ 5AnU{Ce0p(SP|lAv)a$<A JH!CEBRA7ԭ.nQ֝N0t掳aa.;TkM3B`1i3D,6rl 3q/0Ug AG-*Ttg Q"HqbQ gE;!JP/u4kBJAA=Mu8Fg ̈j{ nF6s 1f^V}XH.ZĀA ~b3l,{Mh2萆}cD׊aGS-ldPgƐ z3/7>bA\\1oVztuZ;L'"B`BK!_"vNcg]9˚(?~]`m3?uSw# Kf3xW.\Z怋\n:UlDX 6Xg OP")rB\"/٩Xq1P\`\K$JEW2Ŋ03ӥe1F\(^,%+>òef:;dqXTuqNv#{QW=!V1#8&[WA8NC/ lm ??ܱWΐ'kw9D+l:'Xo.fHc.)r#N \IQwb*60? 0PR"4ep_v%B)挊V` a<` 7 Pc+pKbe6Ka1d$+偠85 <%2`O:`98JC6G=T3ʍڛ~);r5 t R K@σ'ds6 p_P6b8Lk BĿp?y#(ISjurp] 9E^Q0fI. ,1D.u,FR#,g`.ł\sMk㊯M0ucޘ2Wd٩35N\A(pyΤGa&S@؅PՊ*'Kܳʄ<5(mxK~.Cy-iF]u@ :v:Zs 0,PZ #+i @5>dN֞ 'JrnC)FRUgWNiJRS  V LJ^s#ގO{qVްaJs9/pc8io'Hfϑ\g>'1^zWso>Oo-Z\=lҮjk/rvߤ痭q 0 ?mo=.˗ t`_ߓtwu~7{ ^\x"v?C;1_];a] tt%JcZ{8_!bw~?In`vcH-`V dREF% VNWOWEL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL be@O{wL%Lx*.hj<HzAL "&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b\&8FB 3@Jň @AML b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&Ћey.GB<&k0PZ Rձ"& bI.1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@/ ej2z6TSu^_֓ _^~RdwKp+Tꐸ$L+Қ/aR5)t%w' "UW 3N:vV火ݤ\bpza9yNRwk0Yr{ gko{N|=ϴK!T1klv|{O?f|nQ=Pk d4(ˮ.?j8Ii|͇:3ھ@^d|E`|&D%#@EG~ܤFJi|ȵAR9 TX2wJ1 T3_41G}VԴF!:UoRҧG ihgyFIojs"2ALs.y`5ʺdpUoG8Jhfhidz⚣YZ@i/-FiZZxK ^;#@`+ձJ͡Ji +ijǴq#{}X ]wJW(;`P|c+VC+RwJke1OGW(8+wR:Aph?"B=Bi=tB)2O ^\d:"B, \Y'^:\J\@rXnP`P\ǎQ)PJAKh!yDp믰\h+PJI;z7=;\$ϝȰ׮vV=nRZޕ<վCٍc d2~p 2Pw¯wY,nܙA+3VNi (zQg yu C%;;((v5Ҹ& 4Aϒﯩu wrI%v]apO!u'7شPoUwۊ^E=;`]zlҩ̯ivnc[q=M+FY]IR d-eV*x^dY6]+תJ83s#V٪qUMv D{I7yͷElks!ͮk~KrGVxXp0t<[v>ƓˋZxZ:o !i C[͍ªAJMTeq^gw]o ߙ{ Ó?'ع`5!]0hw_1.&F7^cVȵ2ŲrZVS#V-`N_Gq.J"De?pumK~3uarQ{iKxTr|Nsb[D&aV̫z{E华3s] ߚqc_x~{o9~kwvM.N'%trzs]I%Ɩ. ~G8ѭv^0Q. 竦w.L_iX=F)QZDJwrWe*+S:pxa9uj]TP;~C-p[ZMuϏϭ٦ɍm J"/.zK8yb?rr%o{t:9m׿Z~k< dڛDW㱦pJɰ|q52$,&4L `%6,#]AKv];~dW ;ӌ}PwIp%m:CtH2`;. v!3>5/Α690^s#_^|y=-dC($$Q3e?:e:">":>"8>"JQJcY}r&YOo59^>7ٷEh\IaldAA{ f;^> fQsR1Nm+W0/ $vZ㵀3)M&4c51,,VX4Jg&&P e,'IRBނSfrQzg<0z@L5$o~^kpK&0ؖRpFN ˍ'L:G>deq)9a\b* 2z!}LkVq0%:S14A ߚ&p[FT&'aMjY mU:W 늑-*#nW{l CTh0 caQ CfϵZF%Ȫg/M1-nsIcU55=Đ-%+Y{Wmib1 WTl [C+ P}sF+uvUp求e-j@B5כAϘaGQW'!;L{VxR7!ΌR^t֥ɆegJr͜M< 3[,;]E}&4R׿?ȋzכ.nEOpz7;p θd䒝|8.ILE|>L/N7kP6G l*1d|iue97O'CG *i[ wwQ%v1cܻ -wZ:;;^\@ N`rt4|]ÓѼ]חw=ԀN6X8KsPǿw?~y_?߼½߿870- "eG 0?zOhޡiU޲i4f:zЮb]vyK_kjr[]٧W]02ݠ}Y">:\˯` T?ܖJs";܀pdTpsmi.)7Sğ4F/=FdkX/'J52c VtIbICD1^lXB^Zϟ?ùKf 1g_T?zWZb )UlhDt:әP-;Ӊ=-yϾ;;nPޚnxӘv,g2ʧIۙ,2Ƨ.t^FBlb |5.R1$Ty0G)ˋH .3#KXQ!q9e\|6H*fD4<_u1pN^w <+4]YoVkuF)ѡj&EMNIVt0&M &u{m(Œ20͒k/cDW9RqgK:@(03g, y\ <~ i\BlCHJ&XXeNYg`4KSVV@t̥d`&8p]G9Qw2W[κVâ$ j(ح" ϸי_d9:f帻gha>YbSi`}Nڠy5&7(H *vrmj}zܣ{} ,]!"\N"Xc.0S9pú=dן'bvx?_gI%ב[p)B/wwY@+qwjnl[nl|zf4i4ͳ[ꢻJr[Z#yGip}iavhg+\;I>ϡ\sIdW\eDk#E)r^^8yp',ƞh+t/Y_ȉR5ƕ>TOxٻFWŠȫ^`<%nS$c;XHE%2#Ed wcHR疬{풱 R%F1euaWEr/2-#=.TspH'xҿoxFf nV^4gk#_6Tȵ6Y)AVMT٧BR`ͩ ZRؐ"d(,&gU+k+'3Βhgɉ,P- mX󭉳{Yo`OFYt_V]2zbNٮ,YdIu㋹YS(;h MVUVFsQR/(:K89=%x@kޗw7f;|~+@O/7W q9>kU;1{2J 4?٠Z~7t))×54X"T V(1[ɼY0`+}!-du^\r>t|Sj9) v&̉LcT9<硺$.MإT({5|"%bW9VV#D:cBtxRJ=PBcJXPdoJk=M7?nޥ"oLp :tRxs^|ɳB ܐ ߴX!gi«`]TN[֣!rcJ0˕gHm_ݵ%ncq4 \s;O{ `dth[/dv:PY8k"ģ70hj1m9v!(Lԥ`*JYcL(M$!pM>e3Mvy֢1^b>EfٹlE.ȵV@R512$ivDHJʸ<^ 7da̕<HJw{X.=y z "\ n/Ns{K%>Ǿc4֜IٰR*TMƃ6-GNWE]`1D.,;9VՋogZA.!`h/״tA:yp>ͿܤR b} IE/7i؛Qr>~!SM[4 rOMS?/ *EuU v1^o?d*A%paA-DH^:FW@:Ƙ<}yfhEjYBgUe]̕`ֺ@Z>uPVvˮ]O# &iiT␐UV)l,0!DT"TQi&q;ԱgD=s"S>Li ڂP] M^ˌ3].YrTh7 @ѷd)}aKʘ yYxGhdrHOk% ]h7B[Xn 0N>9{_v@^Az4bb*hEP1mPFgF&D GTJeGmx&՞/!:d^2B @%FKUkyPq KmSr))anS9#&.2!YpVzh4Okُ,h%dT39JA?f{&# /R9ʙ77h>;_~ þhSjti<)rZg'M'WIXP%;qr>U zbD&5Rt+d+G+rGqD"~EbslMw3~4MzͰ4d/EAkj:%0 !|PNu=ԓ|(ݧ:o_p͓EcʎzH%@TC@ ;kH2Y_HQ pA(yUadOluަ/}-8lkgs vKJƬr>gf~8MbUc߶ls4]Gf}oY-zew% Ae8{{ U鈶nP\]1kw]zlh r65歷wO9và﷏f!eլ6z~laZn_ktswǪ?>>k~!j}7<3G$z[M _,c͆;okP>*-a:Oۉx59MoSB߳?Q`}N}, D!IC!YR($IZԹSH)AvBLR[9C־ZO~ͷ`,aTTgBol]v<:mfNbNF=Yn;R:ZVm61 1NEK> aZ%%py=b( ɼlfkM678Uy =}ߣam<('߾5W: Ihe1# e',ЁipI->Ȝ-o$Pyj_nRSYA+Ykm9;hݾYz)ɯK% J!15BuemBBT\`3G3sN0|1b< ^dM[$lL81H/Μh&>z 4sƣV)++V⼸b}(Tàb ~;_B.m_ZĸSug/' F,S!0Qv%$ՖOZ~Z {Q "k)JI@AMB)-k` -`Db.qx,Α֊#mH85}jsPy)!pGNtZwPݙ%*kbOJKǼL%Ni,X2F|TƠbN2PV ͓ )vh2٨z:4^;朧s"D -dz0r癎J-4O+ue_?#Չ5zCUi҈$dxTRzAe` c*v}k,kͦ7*ذgmv "PԶe\$/dKx %^6pyFÀոRDRR  ⊵d%F; E0Sȥ!dNӁdNn'^ uJ@ټ%fYy8QTo+ɷ%?_f%nê|.07Q_IO^o$|MpA%?)I/(?~N&EGyUAr<]#,T\.i)4ݏ__aCv#Y㚷y+nXTbAV* EEI]~?K\̥sH-s-c&NΑwt+!z QM]M ^O8(v~zZ")SO"~BǑ<=tsdsYᐫ;)ܚ58K\:yd㯛 vſ4qӛ?F&=3r(|k'[HN,$7rH-I `<7`}42ѿޤ EMNYq^q*iZt?|{Aj>&iQMq ^&isWEJ~jKԏ^Y0gS7uG_u}/Kn?@-sE90=KEx]q6\e/X?={|K90NR ߆/jCo?_Mbpf)rd_H af\T K)Ƒ#F>$UbH.r}á_+G%OȖg;gQ5,7a5#.E6;w(_&!?ؘ~v݂_}ry￵a#:ݷw5L վI]DfO 88qr+7sWJ+Jt]mPWY$4dp3wɋfFj*9۾?3weXh[וQJ ~9;C&]UxspӉͧYGYZ[w®td|ES)]_nH^*-wj߷+I;٩6Y]%4]1 Dz7e["%z,Re:Hrz[Vq - Ono _ |v =W#i_v˫~, ) fJpOlG4?%b=( 8g7Rhcl]WFuA]B^uteō6C2JuEE+q+fq+=6JU14+uA]ɑ Xȍ 7G/2ܕR ]Wԕ*i0(VphKi]WJ 6rHW ,ѕᲛ+ܺ2қԕV%{ʮ S++=וRALu*1<@ t ҩ]몊V}u]QRcSBW꾡4(Z3 Scל>Vn ő-ܢ47m0 ]ue6CJ+Hz+*_we ~9;C@` XWu|euD,- ]q}CucO.d'xrDfx Ԋvg)4 YriÅEF5xkzbaGR`b?2Sh-Һ[[u$%F46GWKu]%Ů Jʀsv+-`h#+uA]"dG2JqI+JKUc8xԵ@KvgO[X`0eQ| O5M׆YG;(;SL|Iُ_]}}Mc(8)!er4YaH@3N㲌Ç+Mɋbh]WFIu%B#])0+MnF[3 jʒ#]pA7R\ ً({vI][ᶯ+g!⢟uWFKgWF2|Aw }>cTzx*ZU%5vg0W*w]7VwAg50Rf5.eV:x>Rfu-84kYedRfJrUUdA2#*RU`rӕk)pP0Qd"Ԃfjr L--֧2>y5Օg&p3{ѕH(#u]mAWɡ'sT~j9yWg?~[vzgϟu b>+֦}}ۡngkxP;:α_?4C#?@}6zܽy{ubIXܾվioH8zy0|ˑO;|rXb7{蔾w~ǧ~ ww셞)on}vӚG# w?q_n_[lf=/T$(yfQ!ܯN|h+'A+m#a(~VͲ^~Slƿy1 ,R^4Ĉ Ì#Pa\` k HRb|,;$(jw7˷/z_ o w4$\B(ВQ$ehL#8j6R0\2'<׽>?ϣ /1eCc6ddqiF} J0/4vl`H~]֘y&Y0k2(v!+kAF`ZI<&,ɨז ^``3jU`B} Zzf9bZ[v).3H%Dyn5efrlR?,d-QaZIyЮbIx VO}6K-ٰeMYY8S%-IuT Q;Mv(l`ы٤Wy^1M`=f͢q0)̫^a5AY[apU), qRo)ki9hwE@ja}iR2B{!hNGmRwDŽUYrA8g/X㨙&WӨYf y|k1炃^LB̚@}\ʬ]%2蹣@a&6kٛ2^?TƋf:L|z]koG+D>m! X 6.6$_2cԊd6)Eܶڀ-sϩ@ 3X "Y(gE7wآDy>ҫ ݚ:AA1 q04Ǝ\)!bDJpPR`gSOP br`fI^:H BРM 2͙`Hq s$e丽Bՠ+x3j 4GSm(S%mV\F*^eFuʺ@J @w_R]*>#iBիf@!Ѿ&伖H 쥈J1MȲZ>A!Ұ*4]j>\FI tfp;A9!MV藢H3fHNcDBmU}~m/tev\5t:%w~Ԃ[VkW F=F/>Hp>DFn>%u`8 7!+J$W4TR4P&S!yCs ~C|QȃVs IV 2*e^i 2]G4'*`P#Pۀ8n ΏdAX? "*Ɲ(@mP]֊A';=}*Enb6. O&!The1h6F4{إ)GhC$*Kt]/sqLF>5t1 l`=f AnK)AKcE@AK^>^AB /XC-D;*g e{tZl,4^ 3@Ф xd]\EQ:LMF"h4(=(D;䍃"2Ϊ ת ¨4,,{cFwMe#z16ȉ6łbL[4C:jˢ9q$Xe9h7 JxKd]ʡmM1Wds^ 7"Fh~N]x=D+Y w]z@ Ajp*1#K[Ƞ r@=-ZqaKh*TO"($'Mm2X{ikn3":|A/dZ8 R$ 4FDf,tXLݟՎsP@ 4@GJFdU}19Mk.YL@ZhQXAlR>ɗ Ug&dd, D UK.d砟hS߲NS=sP Qє?Cj-*whUPڀ@`ZSrlti }V&Eυ4S3AP8Q=ZMWHOQP TkCWV[2nx Bi?XoCn>jEro)D 0PrAvf5$ٿ "C$;)jI- YX m3tE@ޙ"B2@ߔzQ!7~qUպp $+^01];^x"_̫Rooa}$i:d.;zO?[TSL)źn( x).i6w-,L֗Ht{B'*\xb™NVёT_yAAe՗R,Lrtl8W+KC>kۘ8kEMqõWHLB7D^c%#\82ވg&E;^Yo N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@zN Tc e<$'*qy%`O OzN OUb'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v\'P8('vp@8w!J% @Z g=; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@/ < '8M;2N2,@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; NpqgiWo&_0RS꡼o ?|]Zu,竏0@pH%nc\2QIKR+6.{@(u@pEGs0ppW(Jm^ \YkW؊Á+ +֘W W/ V+ ؋+ 7C+6W҉p;!A]Q`ؕwRgEsW/3Z;Bp Eks+3zp#)~<火x[_ /OW X^a'^aц4p(gWp{르^ڶ64ں7rӕ;rO)JW8]nTDcjrXhn3Lu챊/[k3W<9m9mSFCtQ.m*'n}V *~+gEwV<]xO:3e#I-y(bZ?o";c)ܷgV\aP}:WuڔO7s9ͦ "Rjǧv}D|\z>ܴ71pken۩ƠGG&#c(V*S&d]S;WS&?HB,kTJjՊl"?]U0;WPYz^Tm1uwu;^[l^ZxG\"ʡ׏]^}9͇]XG~mM߽ ߦ<]t3w gHgGi7 u897rXjY^K^K@[{U[>s@-pG@N-Em)DѮ:ӌ赎ɛ椑ƕU(2#Gor otyWS7˓;q_-ޞbh}C<>0>OOrP\E C;+oE;7Z =ڔo$YX-I]V/]t]ޔښ[c,4CdUMDUtg]NYuǿMܯ}=ɭzf5y_z͏' E>nj'ECٌ*i*:KZ:JJQj4n֍JXpDz4Z8wӣQiG.^ҭ-3OWR7hk-?̶^"w%ѲZ]9KK\ޤ=4N;ՖqUˁO9 2y"wQk7W7@Zo tVp3-.H<~jV&AC"As HmϐdER 5Mi)6-o ̞V}9Έ$hp&Aij"?_왍_ttg(b>p-8/n>Df~ݎEd^9xc9 PYZKRn0FIZB^9r}k"Ƀwao*G0w}1|6Us~BP#lT=RJ#EszDbp˩zTt'QH\!_/ŵ҄45Zu&xEcbgIe*OZѸ;wtu29H Ǔ_M2쯱<(h6&@Cˢ蛶u m.O9"אLSRH}wQZ\r)+{8e9>f|`^?<eJrlZϾN7z()yZ.fozִ@]}5-LEsML n}>0"|D ,KE['ך1XvxHۏR^57:5Q4nF|h)nLÛNwf`ODx|yqS:*m6i{KE# mmiS0sU.zeiMcxgгzytȓczI[?59]e_ҴOt >WCQXey2~'Fn'Fhhf1T:\\E3!u3Nyԏ6&yשŞ1{_1q 2tWP"Ec+i;KC/47}x -嗶8m]֪Th*R~WiTUrkH]b8?fqӵ=p>>|4<_,WӲzI+`hY )[z*g$;wk^z-!1֋dӰs㰾]!&:.ftzW4c[=uEsvN֧ǞoFS To)ˢlZ~+p:@gu.KN*3+/OiڳOu*C ٫t'~})']?9]Z^DeLWCr8=i6W4pԮ@[m{gYoj{9aOw{֧s^}WR5mqn޷ p>PK*Ȣ'FI]\A5V6 _S{kgj{H_oPìwv]3β%a<쯿nh0v6i]TFeED>~'J+Z}+rpw^|55ŞiAALB7_~wlxߓו6xܾ^]g'sƱ[6hu}<]ݱ%ࢿE5O͖֦j_;m*}%'c٬[B].jg A)PQ( E*CFʂ:KtL&2!(L (q!P?[XAFyB2X$@ lb/" 4a$yғk*rԍm3رkEv^We+hP>'M^Zh^+{VG'˛_ܥНˣEƼ`L-r'JYwѢQ fdSQ7ZFW3e'Z K>dž;[,Ex뭠l{`|G^yzV Fm+|m11  I>T&ȘgE٪6#9 ^d]]<_v>l&>_lsA4<6_/ގټ#X)a9pJ3&EM52[[*+INVYKuzLVw RHKMTmvl[&lW',ކK_:ɑn٫C.IQb#ХI.""Ӂ$m=K_ܛtviQl#j t&u#%eR6u@*3L@/j/vKTɃK7V*Xʭc{V6d}:슯f 'f7}W|7 lx3Q\'GGT:1Gx-in]ׇ]K}!r?7DcJ.ۄ s;(RBғ%@5P*.*GJ -h+>ݽE$ʔ%Z >'A礅LKN-n[5ErԠbN!FB"rJ ^ouQQf<3Ue) qQ&U6$emD c>u懳^k%M F,'2 Xo짎D:[!jhScAmFӂJGՌ.يU!\Y7cUk'֪Ԫͯ.o.OTɪ)gy7|O-֬=HT+?HjfRj @(Y< (G,%GIÇ6M0.M5EWr.1QTK \.>(c):8l-#5:VR5c3r֌J3]،3vՅ.ԓ.{?NHW -K÷],bxt˗6 r8/(+϶Ҍrr3HPjlhކj,Q*Yզ.YN vl =ioF."9lƎabEk7]m'|+/ FMdFrr`t,mw)2-dFc]I$+Ib YG!bHlT >lF79U1E#6]5kݤ'eRI?jM6;9lj Yd N4΋%q6dD:PiY9&:oU3%-SN5uV<?zq";&_g3.Q/jX/'M=Rd! eVH`0Zl0FIGs֢f1})lChWTї3Io l/[Q9H_'Y[Amtno,V[( e"*z|tBsAfgB"S|6!yplG=5T6',I*I;N'WبA$jlET `n- Kg=洬q簿$dXbMu*la䢖J֩Xꢥ7g,)-k'!_/u4=GБd}Uw#>V*(O-] 픒c lBo:\(baiHČY82?`>E-]lT @ yp!\m׮AX,,,XЩV߭W!DuѸ$"%7𾝤r~rͯ`?C 9UL7+qz-]G0BQAzU??,MN>%rq(xI>=Z.,7x>Nxsd6?&vsOiGF%+iWΆd|9ȼӛF/<9;pfC*U9mi-#uaTo]GYUpc~M-?7;/cf?:?}>?w$~|;>q;pE '_E_AgK߿`iji^o4g,!: zXWKEB*!ˏerIKu B3n__DW2Nl]j uhU.dSIkPMr3؃! k csNqw6C?n{fr̽c6{s>3O1GW-+ 11]diF#HjUԆj'W. }w}|@6+@D2g"1: >kIfK' dh۵ C:f“?Ӓ-r҉~&cHݲ#w+r5҈l6y-mS6ln=}%!\Og&=9"6f :z%tdstXMI>ρݧE ǛGX]TP:F"4veG%5 ;؞B;9X%쬰VGqR-R!t"KllPjU&j)dʮ\AK j47 96#Rwqy͘8}xqy늈ا>C^]:^^:^1s,(j/vj-Hd`SLκ: 1M84Ǎ1}سV1-fNg^pk{>or_vYmv59d5[=*+wrxi{(~.[Oqt>/ւ g;Je&^&*Hhl>-qMy3<09:-j?%w ꆇ7*EΖw.stgʂՆW+EtTֺH7( S)@& 2!Xd5Uc)>G":?{gǑ$tFfD=l?,}X1e(#Rєxtͮ VdxLU*8ԍ6Yqvw37c.5o8]fC@[ѓ#5CoaSwy)<)MGZ-@D5)8C.}kmQ~Lv8>$lyPmqtLZ%q?26ưj}B&A*}R>*uܟ1fn0$) b`kb2HO|);` &{H^:2!D!fv]L^I IM MNK )n̖B(] *f̋%Xk .2M4zn;˜\lY-'Kjofb-T-T-7%+׵9 }Ws֥ӏ7]5-5Laa6BEpm5mq2} JMrMևpjyp#.xKLib4cq3i ~~ݯ;%NXG?j: 3It=5$0(F:d2B ,O=Yn Hv w߿#{.9 ֯XO!7i&Ԧ_\&QՂպ~GN.>|ךݳhɷq3ne8Z_9-Nx}}O{; %Is-O^֐;X52 "?n'GOkK>z~9z>yrIyx}OG.ݺiz8ra3o ?>暷Yع2M.L:U?ּCݭtiɁ>2[UL PL4efz&7d '҅:10É\ ^|BCQ iѕR(]WBR uRԤ+'\Jh}.r[u5]%"] p5b\jt%2FBjdTt o'rX|S1NjW(ͺ\|^?g!gzDN҇nUOL۷_됯"ʞדX..n墼@߇bŅ͝xs0=\xCKK:|=Co3r.zh[vtH棣ƣhϧ\8\BnzQƐ Cr7/2ŏv`/uo_|O x$GV^ːN.X2.Y_emиXOC{6qn>K`*97D7~u鮯# 58oN=Vŏ'֫bp8=vWY _W~'; -s+=یr~z<\]?W?Z":|ۿo3./. MDygp/?-]nHO!Wj3qg?FުZUsn56|gv^׫&=Z6u'-u+jH ,;-LжaO&, P?j9-C B;ѪLP8 ؗNS] p4jtŸbע+}qJ(,u֠] ] nT3̴dRJf++ҕ{FWMТ+uw| uGpt%Jp+} -SMP8=ѕN?7?_WBui=:gEtW\Jh}{$FWsUr= lƖDWBS: ^`˦~G~= 8<\UT[(K6xK6 CY%oKWu#P;o'OhiD9~3)eGyMP\uWbEWBt] %A u0xDE=*ޕ7L,+,B~tkc`FWN6`T+"|] pjtŸ)$-֔+Ī9S2t%:kJhוPn$Bt4EW 짮OJh)+ u}UCQ7]1m[֡9*䝦芁ѳApբ+ J(V]]-d)L* ԺÍwhL"++[uԦrYW$ 6׍[f+AH2jJܲ6v[$Yi7vSQ(q# xmdEJr^9>m$/[X{h@y̸^3?whA -nZv&Y \Zm]tV] mt)@ um9yp}Ԣ+M)+2a;$+NFWKF+B<u%s)N8x=cWkѕ;Y ,J Xu5C]+&GW] m)=*ZHW l]P+%Ң+ t] eUW3U.HWɢq^PLA3ztlzwe iL=-M"M3pCY]iZpiѴT2B52ǽ4E+T~t_b7Ѹ66mEr%Rn eFC .-@;T(cCCz MR+EWBK̅2 ]+G3(h& J(K[Wu]P5&8z3(Δ+]QWU+,X[Oc4˹MlVC mt-]mP ؓNڗ^"uhaC ,LY] G-TH(SZUW3ԕQ+Jp G;UZJWJhCѕV.V]PW1D"]10:P+%5͘lѕPՎI$c̞>gUvDifj[ &_hM.^]8l8{}sF^jџzd /=Jo?tƶ ha~Lz׈|usAJ/ekؾ^]qq56`qˡiM"AƊ[ W1/wFήqfUa?bo݉?70L!>7Ytz?nÄ;n|fHܳ}kfBv0>U{k1PDž3#PQܡc>O薑y<OWL`} >C,/S\C=^s?wƯ nw7j^9`7q{jTQke|z]E%iS E2i+5(mQ>3mawić9D^׿,Hk|@mu¥hU2 d%P-9&'RF]l22mf&(AU'\P)&jQBr](cZ"۔QŔEaԜ(6槞~QD~ZhHGM\ DA@1WA1Q&tI%͑!j-/dDj"B>yj-%J`ITB3JpMVdl)I%eGѢkAӗr\^ެ!b5crIJ[׌R8* %e`Xh!`N"4=XzBת#16otʪHR,-u@DkH}yo2dYZH[ІiN~;WhRѤ CdJ Ci>劁Ƭ2:ּk94Yٚ,0Jzxx/dp" A@xh艤}U& |ReҖt  ,R4֧D!۪y>q.x ҉YUb9c$xNTehZ\ 2E%d:Q2)0iz7[(c} &G1Ǒ֑~D_+#eІ備)ՐR@(%!BB>im-Uल8DQOJ=i^:gICR [AioeM1j0frYD.I{V RBvX%JJEv)I6$2S(! @`)K-OnEAE E'+9+0O=4 o5h;KmICRTCl]y ڢI±D6V:؜b"uR6 ]K ՐgGg*){uQ8W!)Mٰ0 Tʈ74dkjAt- ݔ` f"\"U+V#IP2R6LHpe ؒd, |DEx*dM'(Mrʴ] Ge:Ϊ"(X=*z]C Hd!fH57]T? +1e4 Da(!$( "*$" m,JdM%ԭ))ƒ9 N0tBC\9J ufS.J@` e ` I&- pX3Ttg2B@Q"Ł.0͑FA(8gI8w@"=doH_(dT┒]P]لTeFTݓ.l{bc_2Kէ Y D:-dJ)FdYl@ $DOnwiXs6jE},|:omBq!:~r@w@{ńE~ŬQ@r|bL:t}8!`3"vؙxcW7]˚x K@HE&rZ!d^1P>8M>)3]Kc4-=ŋ`C%!YZ{[{#fH܆`m ][EU&USY_<{UbΈJʁdəPL)q*@?$njׇUߟ;JK]`&Ռ&"Xj5YҵD5 Qte#Kqѯk7F!t&% I,ds  "QXTDd*jQܦ.7z\+hh0qfQ:Z 6Q\Cؘ5EB bjSѭ1~5 UyHY-:h4vrPߍ o9-PpPaRD6HmJ F 'GO:)r]gd*"2 % )􊠔5z6m6`f+BJ1$N B0Aʟ"wQ^0b zYQcԔ*Q f<9 T,+~x.]ڈr!cS;SvV댙+~Ҡƚ4A*Ei J>{jgR &C@0 >Y tStrg^4Pcf1 *";i*C *mP zxpa Vʣ-.(-L4E(QBb)AH Gk|L$IkO^(T}J*]\0@ b~pQi04fc)kpUj4\.CC,2c4&>c#T$ 8uS't\5.7Wk]шwTu`B{&r^}!붹[6~)6D܃ 'ykuμ[͛ 1s5?'dyQϡH+7Gm_C̾_%~l54bvXjWtbeoҫ[v1_-w^~лGׯrEh~ݏ}Yf')Hp}8e/fo K75vBu7{WKqhz70~n d'6+I tWIHX{FNسquN ޸zN 픔b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v\'̈́sr2p|YGK;:J @V N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@zN qsro|@Kl@@kówezN ob'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v\'P  |@8?w'P:N70,; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@/ zot~}fݗVÂO?@se_w?˛&WFqlKFj!\Kn\( ы0.nQ]u ]Y:ڟ ]uJ>w(`ztz] }FtQW.s;o0ˡ͠UED}?Z#}1QjHWAIKWss8 JKA՗6|m26|]"RCW_REW *0]=ul}X2@^]y3 fNn7m-\n7fH g/O}; s%sUn'vtχ;ܠυ z4QL/W⎮u8qaEa1IͫM7w+nWez-{_[(׹~Lj ˺@N,qS(ȜҼEghWsh *æ > v߯qZpf#:ܬ1> pfN{D/>On;{]n=b},u,׻cj;'Ok۟-_!168g ;=Iy'?zaU$oK[ ݰo[[LLj2Iܬʐ?R OxsہgZ^fNxzt{28۞ػ@8NϵŐm3rHwo?q;eXc[۬n.Ӟeㇸ8wem$Iq.+ȫY Lcv{F[&%`FV)RbJv56JFeED~ǝ]\F~wŶ%Q[>ZWOt=V·\-Y5>1[iAHYcxq^g>R a66^R\~BK0Q9ND*ιYRKD<P6pNFZ~aUPրl7G>,lFHK^őV.T3Ե$UbWJT)d:TWzV)aU:RE.Hb'E98NWT2RrvA4|b\Pg*yxϕȉL梄FY& n8r$mdBWN<;jjKCT"Oz#@x?)'φU7nT_yѦ\,:³aAFQ[|=+xeC=Bpo fT|'DnS1@7f:K!y};_Un1//M$L@d i*5 ^ ^^> < Չ{'XnLsM5؊ji#ExrH',Hj%(,m ,FΑ}IJ2rGOZʥ5I"H,H4ZGY.Y/F[]J}O%_3!?2On<.*01n:].pb *#-.\ys*:T){;!hUId "VviY^MO!zvE狨ɟG qe8j%x=,7fE<\"  >cj=|( ֌84a!&d&_guאd"c&CP(fwjÞȡl 꼾a\6n!ٛ'vyN}]ڞiYf"=盉ӛ}zV!}gXlxΖ+{GL5ʲ^xqĜXr*ȩ7sIkG E&SWm6kOg(n1S:]ptytՌ?=.ty`_zBYȪݷOf~|x647Flg9Vr< 0og;jb˸}&8Ě_ ?{ZSmyMoam{o[m3xrB(iqǣɮy'NF\|BPIA6ĝOE{~ JLAiAi!?ۿnI3J kGnT! K8"q/^38:'%t#M|6 ۣZJN?˔=>ir ~\]>=88_ZO|;ArL'X"YQ4h@kdG2~v}E~.{A;܁LrΖERM/+/>+[Le íK\cIg^>y+O9_d#? 3u>ș# /q/|J]w&W`vjMp>o}VD #濾d&Z^B2Sٷ/C W)!|Vw6[LxD 啫[R~Ff{^fcmW, Re^pm1Xʉu눅Uj MkfE o<|vג]}ç]:B Ꮁ*d"Sf-'9'B1ɘ"H\ ߯7R7}b7o9p5(rlح"c;y#wVo'S%~GK<)`x}$*4kXZ AR^9SAyAsgWz])yBޯAy#Hsȑ %`5:{N".84SB1Iø-0b1q]6 &sEG4E$1RD[0:.FΑ] g}&zSTӠJ>&c6qW D</~;goC9os7y={9׌sDG*9eL"QFF\ pKo(gg[6bb^KJ+cdhǕP0Z9J-Fj< D68Ҋ8Ҧ [=5&+["G(}힭8pnr^5nRp#8('ޠ1'$84)\9)xf0N3hoN8*Ji8G@~ZĸieuD {4O͓u#Okw/U*B&+Ny8xTt^&Fka݆2{,(2:DԳ@Qin=yieoꈵl>vR,W֞Go܀OR|;N)g4$\TZ{1RqY" 8lfN͠6#*1j`BF4 ΢e 'Z# `D^9ЧOU}4ݟ.rk{;I>mNo邸M "ԟ4dFMf4'Aό=*OWVfiY0/y"TU6P'0O4).)z} u{YфpG#SLID4 ,g{.nsJ Y6=jH*o(:nh!.in1H4PJ[`DC$s{b O2:Gajpk֩=2 lc dA6>X:FDΑFDIM|(:I[QR }t(7`Up181v|V(; QԁkQ|Do#T=['VdlWV_UiZiyHR gszrxAKV>15uJb]7mܝyJ5%(at\`l54rg2*+DL ъ מ[kWD~^z l(rMY'8฼cA! h}7.r;׫oNTiU:^Lc!WjG.cWa&K燌:׫e2ra(eBy?@7zV>v/*ha'u UhwZZu*M~0@3%bǯztvP$h#u4"b`Zjk7B@U3OMLq@+*Fi&ruHY"!(Mj[#T~b߫GO61; ,qBFe,$YBu$"V$:q L\FJRE520D HGK/$heuGB\k҉*_XPsXPT-E.eg}qS ,q*i+n-'o7vNya=S]dbHK~Q|8z\9ДPY2TSͬ^EMG 8ENjNYfZs${呡>scƠSbT=lB%UZ3#f,Ubq.u! Egon32Hh.nUiK3Yv/~3ٻ6r$4m>d@pzd,pbK^I_P,Km[J:$#uYMV},V}:=}^'Fl哔)\TƲ%.%| w֤ /'&ORV[@PȞI66AdvDjș-#vk܏~Џ s[P5Xu˨;<دi<3@P}r8jH^I#s!:TC 8MP[W*]$C&ϐ|#^LĂI`8FǬ" DqeAEmvhFH-ȉ#wSV`22CY!0̘A}Yٲ&ΊvnxW.?-b&3C>sA+Ј[# d ނf4ߊOz~f/N* :RpfXQ&U DZH NL$VvVkVu:cȣB.eX5֔Eq%LDIب2ËqG!ߜ2%howidP7,J0q =ޡC/JdTД棱dndF e+I>#jd P$Ql>rVsZqJn(x]4F.g\lMAmlnp8WgY$죕;EГfcC8>'S:ĚU{kPđ$]. Ʌihչ߬VpJG,q9@o4" }L4SiǍq5wN2wd@om$fe ӏVynȥp}y~& m琏XqK>ad}ٹL=Gy{t体c=wTN-zp_pj/Z 3y 3G,b%ϵ@%L$sRArJc,{ BQj5 6$bI,RȼKBR1;-{~KߨؼMm4s\ {B~Iy{=zW]BtV 9e>rcT j>N4khL1mJH$"}|U LY,Zj-IpYp,} 0'blBґQڎmM-N?`0v־j)&YؖY>jf2᩶gmdBd̺悳5.XFr7 |& רeUgU]f6>ƧepXݦTpq[~f0G*zGC,6 (ZT_f.|7Gv*n&b[]4i&IJS n쬙|3v/qF'SǕh\V}ᠧ`!]B^ۗ;'<fn<]{MC 6L7_hfj_Lf5ZMoƗh@ujt9t卪LWѽ7u+r8ۏj:޿zes2w{chE)ؚI#9kI5G`;x|-)ў2ODؽBF[%B;CrYLLFE;y9v¶!`ɰ\{IA3fd(в$B2N,C$cFF_ug#oݳye,u $Q20(LdEK-lHXLZbB0~xyλu61! xKA-S 'FZ)IBxcϢ1m;&i FWe`UY1/422kMIZg/S5CdFx ]CZ` k'Xr&9 Ypi>Ȉ#3~WwJ^r--+Wޅ]]|X-w?e&^YB?#?jtt)btz¬òJrP9~muFC":r_j)uS_dC2x_ ԏM-QYHD3L_C2(/1~:_4@ZK>UG)y^ ED57~~AA?.s灋)VzS~!}.\"Ͱ}ZvV&XdJwQ~5Ț _muI)Mix|Fw`Wizw5fɕͅGma,H{sz'oG$;"wY dIKҖ@ffKYTd1 >Q^,eѣ6{W^N-XVz\w QL3) +R\2NoWVXFjwqAi!f1q7jWy?bFx΢Ibm: &ق8J T ):5mCMʹuk^yKj I(/nv(`tm☏ͬRյu1ׂYr l4Fvس\%&a 9 $Dihc7U BQZK\(k%xOB\6NE A+en%4ꍕ*(Ow{&r!$ЍN .‡|Hw3 X9u(@i|9{ۏmtdu>OgWZC+SHC]j@cj `kh s?ۻ|sp+[ TOEM*tQh!p h[Q :T?j8Ong42(2A[aЫ}[3-;6#_n.?l'|8/ᐴuDES5%z٢4``W5|ztOMOG"\JJVi bJKVY-u+6ʈj+51w]1 EW=/r늀btŸi]WL\UueJIZΉ1+5+l%0+uNGd`芗@+4eEVu{T銁ͮ7)"ZBbJTEW=ԕQ{I"`|dcq+ ٯw'[t]P8InTZVgjgJStG]`\ԂtEYr.bJ,+10V[ * wJ []%:Ѻ4Z,t 4փ$٭a\`pDΫ޺?Q34MM3.j)fZs4S,uĨ ]1p&P]QEW=}+btŸKF]WDپT/ꏮM[!\TNΎqӆ/22꣮ ]1WbtF2U/uAE'b<Տ+5brWL\"J(ꥮ$$`ViMv誇ཕ+(GW]mT_ 2EWFWzҀړz5n׵hFWi!ٕNЕ.zhyIkLkMV+wiAh=5R3*OCk+^+7"IᴪSL:]"+\(I#]0?PqafwYRmт2 @LfqY Zg]X2 =,hA"`ZW+%EWL:w]15EW= uŸQ}hrSrߦ2n늁̦)%EWD+D]tC]Y0xA"`c]1RtEVuŔ]PW@I"`IKWLk_ǔ.]PWxp 4\RtŴ!{]%涽GՓ*蔤GA U]ގ({]EW}U+ZNq]1>wQ|@ѕt]ᎡDZA2$zvu9$Z;-Fۺ+L]=4QBb&V&&)0Wn]͟nAY1&\@'ELm&Jtt5lkq+2bUu.銀+ bt.*^WLcUueJJB+jK4Jz+뭳rW최[ btŴ_ 2e,>yRtWQWiw]%T{/uAvA̝A5&\"Uus^RbtŸNLiC]WD]΢'U*z'HW.T;]1mvPtltev hb0 ui#]%Qzٶ&AWꡡ XN*1(MZZ+&Ԃ(Зٱ~ɺaF%R*$E6ЗʩLd~9:aIԭ]KK8-LLhhr-'E'`&*jI%;F?VjqZ`ZsO-0e4%*$]1p1"\J:]WLۢ㢫'ѕQX#HWlQv'om~vE䥢ʂrJ 9̮6b"JߪzUt]9JK]pHYô_ ev]=<^8D1"ܨ@VuŔXR}U`$]#FWZuŔEWUu> 0>ttŸ]oGF볟]1en]+cXr#]i4'5htF EW =h}~caD{1uylBѸ 6 HV7M3k)MPe+HW ]1bڮMPtC]Ae ]Sԓ{,]1s̴A+evG]B4tEə]1nRh]WL誇t+51w]1/+g+Ϋu#EW$uŔXrW}ԕw`PŠd]1bڐ}(*}Up嗴jg\'fvŴ1T;Q"Uu]D/i!qZ+z7u`p* t]O*;Ub%]=4u5MeTIjimMxȺ&۱$[8*Z$H r{]r rUxR"/ZDVHuRex&Vݏ@U~@' W- N (@ALjqZ`ڮaJ zZ>FJ2L( EWLkQEW=2# 81bZsS>D] |O4\simb[t]A"(FWV֙uŔ誏rQ?m}uEZ+1bܮkW1e,꣮|AkAbpRtŴ})-]PWbJL3)_xdv~}+^Ttia{;lM_~Vxb1MGbjEoßufDbZ.CoMi^U05l\;.gwtkA~j.gͻwn4_EbD__>娱 ~> m Wo߱j4p-"<=ђQs ܄7 _.߬:9>bHQoԤl׿YnOWgg~ök o/ S/.?z۳x>٧;٨2f" ~2bSLJbLŽcubxuy;ͨF.zrjtp: BcChg;29*:m 3y1d#]{㭵GkHӇWVq!\qaZs”A;.=B>h<:!o/~5Ėo(\D{o<`0:8=3jmM0?m[~ϷΨְ̨ܼ4X"ͫ^%o}ttGrӍGm̨Z)]QqFEv@uf Ϝ6 -3J71e2}xkA G#FW@Li]WLۦEWO+e'HW $+-EWLCb-ꢫ+F#yPƵbtŴ+%G]yk4"`iKѺ_'К]GWA;}7ޒUC{K߾z5x1Ώ#ӺS\>Q}ڗvՇb1yx~ {xPAky1_kq.ڶ&<[扖? OCki;]_htAYMxW/OOg>oy}-x͂^^r[g[\7li!wTZgt2Ϋ'j;]~׳\Dd CH:~d#,jdƏ̪Q*jjTѶFiv`,tf *x^?2Ꚇ_& ݢy} cpXx~yDO/槈^3"Zֳσk$׏n5kOԠ'ǫ8^]P'я~~~~׶MA4sҢ Ӏ3SMܘ433ZA]~VW/bzQʹ)Vܚ72sYwxk 4"e66 eև6/g{&赾wVRmۆ/W u{4Kx/k27Ff2媎ZDOR~1w7uLOgުzPPO}Fe4ևٝUIւzv߯>6:'}6B;LHYŏ}|'7{WHJ§ :gL6 zfI^ҿ!٢Ld(BBSrEiێ)c')~*ТЊw:>hqcpcth NT8r:Nʈsz2 0 Һ2INUuyj UԘ>NAR(qCXonl)50b$:uSe(70v\dN>\/C'*S1x6A{p1>¼W1>WZ yNc89' 4д:aR$I)Z$4YZ)E,I~(=ˏ3Wu~R?['?˲%J`Z_(\h^yzEm5\wg /"+ Q%^  PH |\rz6%"d=Zg MLto21!LNGQN`ڴQ(* Qt…} !ܗpɞPP縵}HHS+-éΞ{;BpYN'RDNI">nDl r_\~HVV:c's7r0/-\mB9{ht7 '5G _of 'X^Gu|NaC^wE릱坚sǠXVrٹ B}Lq?АtFNʔFnD֮@vز׷ v,&]&؛>hT'Cˉ~SN|ĉHdIT/%"OxYϒ$IFIrX $e (rc/ޔh[k81ޟ'XHĦo;ib({P ܓQyNЮWebjЦ*D4K `pT\9\+y.8o9A2+))S2WgqE:v9T9f6rItonxGa)K83WƉMй=1j[쳺%eê Qv+?totMBY1.虴Z5NDLJ,@DQ 2,ҌpR( IYpkrYe q [[e+-~RU4j&ݷSPp!a1uڛ> feH5&Τ"r0Z@Ȧg2tLCճcPcr0 Ȉ(׬B4&Sl+o$sgl`ؤ6̻lcy!ח@ƽɟ..qR.tvϼOK+\gL9QrYSaNdCiFV(+Pd/!y^~A}oW^;jZEW#q_e ꭢ:L8!۱ٗ~C-\0N&zCׇ JQ PeǛ\>9!HaRgR`\.#0}q1)hPŵ^hMj@<[tE3͸:ץ )R} igeJM~8m~wx"6{JP#wB?B#j uvB؝O"*8}B wt)=<: !>e\ 3G07Seh3^V,˛9mMNVWuUgBKpcˁKyĉ*I'3tx*JpKXg݊X PHJ%GN# H6dd<MV@+繢 ͽ =Xx,Z0֢ bސ B ΌۇJ h#k | xڑef\ ?Y{GBؖ4rY7e B.0'}]f>j42R]K"!NZ?SSO`Yhˢ5U~#BQF-~h12-6aL,v=Jtĭ(ԥ,O<} gB/E8WORqLJ%rKIXޏ|R,\1ʢ]Uii?[@0I4mcF|:` n)[œ^~0`m^1)Mj' 2L}F (Yp>}IBk p?c[JAF*nf4M_.O P?U|IƢ5opU K)0Mg^}*𮒈UZ7Nx' 6#/Ib zA76Y5z!̓6.R<)wRMP)|%1hzssz&1W%] yu0㊡-kHKQlɪ̼[,?>A?6N3Rܾo#K c7DNluќxl:O'Z}ZdOߕ?~A~].+9=V<>MͿ_}?&wO%?W^_׷Ts*): H"9\$@jHR'6o{ J[yTuVUdYXx(6O+R13  <9ʒ`x~l^L}7+rɵӈ3B8]L (zIH(A 1H7 +IFNM( [~ 4tmH^g\ޞfe*N2E&pS%1 V\oL@@aBDDHYVUPY"wY_1HhFܩJj_nu%-*[-\[q=g]vW,݇_疵=Nͻ.THͦrX]IP zXT}ɖե}K:Z[`_ a2h}' k1RMMz)-ꯝ)K!l1[wrH Ͽ 2QY#2UQqwTQL1(VTe3T̔S[s3izk~Ef[Y^( &D4C~$?`P=͙vgDT$cvRf)|'Mkun+x?T-&>z<+miٻ]`>I+H \JrU|_߱xb}GK/[i٦Yg-欭m&m ԋZ+ hwRc3([V{E/= uL&N{OB)#mVT~ *.3gM(;QkDd`4Dhy&LV=,o*Vj;.TJu|LҠY9L I\zF;.U)ͻU.]ȰLmd!]΂~Q0YK>7lB&ڴ:@8*qw K'T*'T4X7W4\S}?+F;۸ɟWc}(onWYIۧ`4λܺľ% ʘ<㝂T{<w $xO(ihj։?S,%~3e;W:tN\yY!7&!7mf/`f5zCa%'-vl|n0Is\!((Ƿ 3@HU;.džM~ @TQ@8!fLnUKx}|Y.XN{ N(3nqĂe `c4jN JqmQCN"*0HOʠͅ+xhBf~M^xAdQax-(lt;8;n̝^7qښ. oDE2 ٲl{"^?$G0GvS/j\̫,`|wbm_))S*ZJSʐ:vL1'E-WI>6n71QҜ >.POHҖ_1B ^y?T)*##cJR) p,)v{K\DD)2 o՜w;ñm9ϊ*k 3!'L9ܣBSUnu8r!)^^ItIm<`xf"#g,/3x~~{(I*:a<3G"xruuF"Юi P6 1 ҫVrTaw0Y(ޕcrя}ylt4€lRC%NlK)ɲZPTGGߑe &F{-ى 1=Oe :~ !7N8O. xcQg#M-|i88񍡟M'3(:<Tazʄ0A#: (<Io'YXge/ԇ!6D%0EL*0uFZޫNAQ瀚d˭(hmX ^f !̥nZfjoTikܓ΀4#N_ݢ LJxPy5Zjϣ`%D8e$*k>J{!@q/;+<(khׂxx޶xHk"΃m]s5p |52.سR<¬To4Kө[G?Ual52F?N;+J)[q럹G4\ ]%f{U&"(A z#RWc˕?yx[Ϥa=y^*:' [%ݨxUuuL?!*cI{Fp(Uo}}& T+:QH4hϚ8hK@?~9 IeT}_ѧ< kdٌת+ 7scBAQ9y)qjQy|o0g:w1)}"+Jji8 S6 +{927NwM/Q AW`+Qctm΢5LlBs7.?g);<[3pYu3ranU&v2:nTA{J7K!fNaa6=t$!a-C֢FiHoqu KA'T+!apx(_f , o[cKN\Z-IKũ?nH"0%?aKt21FGQ(eWQJQtuэ@2Ul3lEǎJi$ZrgGgCsy:۷  3"$Zǫe$?f4wXT#{G S\8iAorz"VeKAIsݟ=k$1]EiA]8E)z(eJ*GFۨTN@# wPu†'o\bV+"ogFj2uUfȨT|JA2DyfeF5"\zF mx7@ٖ(FT}-lN})):np9N蝲/C>Yɛubmk8<;R}uفqh>#\-;7sS 14+psP}P7Njު<]Nivwd=I4Xj/NI.{'.k}ICܑ[9.58ڞ.sEKMS/},M8s f,?s|d CŠ1s6՟IK»:bQ3Uv4-V3>m Qf52.S)36{wh>aYySPUk&::0\WV_^7Qt[\wP4Fƥetڿsh=ԸϪpe jZ45 ꂷկlGB:+ W(Fm4QԨ)ʵ9:(48m{td\\H΅(A",aEd { 7Dw6Zז[NdCi8v2CZ>߻LڶOܯ%A2]/3Fd4~ Yex zEu(sF+V˯-(u'!6ήty -ˏvP^.gU{?qբ'x^.gRKɂOPb~%L>労]mPVl^ӉC}A^4DŽ $Dc$Pp:1\N_4W\Q4u9(WF遗?^F ?eq[_@>@pݗ $1PݙOpLlTW/cSwtyA Xe&!tX,f~עy rOog25s#Dˑkw3[oW-;6Eޒk&E3E9W}]-'/F֙]Nvrwf\CAQY? µe|Kq6x+_̞~\JOigv󷍟eahaiʇ6ľAf2!i6s;ulj4UDyl<>_Y<ߪp6y*%sx/WӾOJH8&DY@yF)jZ|\)!u'.4U(7Wy^7_@?/*-TνTޣ[$A%U} gWeJ./GSp=n8W},#Oz KSZ3/m:YW่B[[-mPŐWN<5ÂNyO[ :_ Ph?<=&=yxOc!gB,qBD*OF7VUEt z\]N4*:Dz ju8 JׁPNgr:kN~6^#N78ZOPڋRټi~oh%Hzj?x ʲD=$#@1,g,^?DJu>hlwtoŹ$("m/eNYeM'&|U;,•RyE.'] X}_Y: &!e!]6>Sc~:[~T8jF!}Wf*+T#&H,uJ̎&I|sp0J,G&/NOP\Sjdl^6)(a ygԍ$ArE=t&%QliE喐]<2[]&uȪDI!yJz(Q*i%UY]ah;J8 N[YPFD/hGX$Z=INЮQ_ȯD6?sEl(<>&_Jhܟ,UCR,C C q _ZPjZQQ-l{l&5PSt@q$]WG9'$8&$;3L٭{4CR52=Cg,T!As%5PyNC3%@$qQgTx57#ߑˢ?2o R`mmٴH?&!IpE6M('Io2 3m B%Y1!ϼ3Ƣŋ=y3y{+%We*-"T5G dڪQ'tgn5HbS#"X]#m24F0o!"ףZ "I /ϪhѕZ-rf`c6*Ia QA(|32EHD l0c2 \r+-zqN]|u69 *g,qBL>7/K3TDQ=x{ F]EAVoMr킏1xտU6gx9ʙݩhNݝ N{08Vl&eT 5p3zg|g~%Jl/@qo·+w gŅ!azB|` Z+hwwAݢ~ A+H8IYR<̳<{GHH*qFCQ ?J"<rbz?x`&dѣ屰(ᗷ#~Xϋ+ֲX] DpvbwϾ܈pxQh~noPOGÂHutoD!b` Oz8B rzВRPO1>uIgpUs[}3GWY_ŏ}d6ӻ4uOР.w6l% %S?ZXy7*{] Lھ{DXC=sXvGee2'mx#gn =ʋ~.[B=u) ;X94E1"ܝ 9Q.~ vXs>Fzu (84mOGqBeQc*e[z^DQ!`穣foo,Q(|x̦Ij=PQ7Z>{n3w= Erag0KC0"ki$e$/W/fkŴGIA9e- i!,c5K7-}c;V/3vߝDGi>4hQq<+ZmujHϧSVߵêklN?7/$Zܕg%h, Ѿ+S;o95ID߃D41xMcxJZz덧Ӯ ?CSCC̣“Mp̂51g2HTLvO$oEBؕǏG3< Yncƪ釬+hO>:?cAaXH?͌XՏB>F4_?_BOAB?(j3w^Lj3O|SN7{;ied]pvHs($jް/\~B(?\V Ҭ*W[]Nݷϧ"|3!oּVLuwg̗ `r[3Su.W6oM!3窾V0ϥl}houߪH흺3/YI$%=7pYc`O~k B ޷ E>B8?!KB\o; %s:9x+HJ+d_spD&ڴ84$}zb< :>ln% `/`<:_!TkU+ZcT]h污}f6*\~s=n;&{Xyoh2⋁yܛiQ&q0|4odb?)e?jXC4M }a2_\>ˉ=,f[_.%[I50^S]F',_3o0?ShK:tGrN}:XȇC)c0)lY)ߋۀ!YҽF6c-'P3( %9&l6f#'"q÷z˕wrGk`] qU ]*b%/>_1c}aO5޴ׅHU۬A'P,?0c~;t\Ofbpc{呏W0=^L~g۠A1>'{vYnT1V7ȹL┬U*p9Kw.H:GK~C.]ks=Y~/&Sֻx1yjdQlƺx!"=@ B>w`߅Sdaҳ6`-b|>_ Z`; u'f7uކ~ل/j( l[S{QSٖJHVSdVhQP/ĦP9R)qbgzل Zh=z|F;Gd Sb՞ym rư|vrR}lt+rY~94[ݽPj6 - e0,haXI0奶~n} 1ܾyē!LuKvr3H[7}4ߌ?w/Mܖ kDHHC+X,nJ$\f r]eJ)8a~Y]fv뚰޽[QA (4D.L _Ϋ%lW2>HL@bTo3>8ݿ;p$q޶+Iu'6r)i|ϱo9 Ǿp xZv=E"v!Tgv6Tx榓о#&h/CXە2Cz01IH(A ƦFJiy/>],G8и L/.ݵۙʟ%P Q!%YJ]3՜ !5aF2f17S)rZc|V إ7A%D,6I%e%EdSK2qXEL@@aBDDuFTFe@ z+rw3w|.rDM ۃn"@#:v3]j.@"@Sc)8NJ %z_k8-OrE\`_|h," +!M\ Ⱥ7Gkj96(c40P9eI~^ӨȠx.DjlxK CR*#eB 2v(Y`iJB4!L50#).GA)TεT9=?"՞|?& ,%3Gq w;󰙐D(]? \Tva_|sǺN*dU4(Wix{odClM꧳T;mާߵ`FcgL7" 2 =I )KL&xyYƤ mJIePU'ΖB6)Fza(S-z׶#F:=JB!W x}vTSm4Y@;!R@9 6J (R}Pu=(ʨL[5[[]css%*u.˃fauZ J1 vmV*Z1Yogl~1C-hMc]ijtNs@'[]=h})]5w΃ b:N^~Y>#˲ qy CNOIvϯf,Uo ue׷X ҇݇t7nڹkv۽.|46<QÉVQQMbe.D)Fj~Y(^_ah=? ") yiْ՚K5ѧQ1L0߽no{%yDc7㹦P'2toIqڱJ.zjkw8]^ǑC-^ql z1Uچ7Dp:P*!2^cu/&+LO84ŧ?L3h&󮸵 . GWT2W&=_kW| ˅e$TT`MRLL12FI*P]̪2a(0*N8. v--Ԛ"P3Myю1KMv׮V\+m8Rd K$4) bpLstWTIT0~DÕF(vÈLB1vRV.gf/;cb;Nč]/_aRu?դň crHקCF >n؄&I >8[hsb1t_,+3GH=O]!O?K) _d?6槊!@9@$\hT?q3򜧚pRda³*(4*LDM @q [$(H4SPCc5mg0kgJe _ AweCBD$i1y]'ql^QfH>a0 I !N 8Ls׸5ƙdL};H8muTǏtAۡ~i̝D 2MxS2 K 0jhT<$j5/UxK4.HNbLb`' cOMTPmX(Tg %Fd?[(U:u k15'_s8-ӘmIh o4-<Ƽm!1K:Ζi]#-(%KAcq` 1,`$4PňskMdlℊF\IbǼ,iV]"eq7ᥢLśZ7@*vr'F3:Ȓ54rBҼDFSȆN)C3x,cl jJ)鉺= ?i|TDt?[ ]=qq!kĭ a6_dP 0LFDrfA㾣يul<ƷⲺ {t 3uޥ 5rB4U f "9sB1{/h ʻ:ּW oגn(8)X}VG拣|QzE$- FZLPo$NNL|v[CpTPØ,.oJQ xS/~!D狿݄:CG`-Nq-ܚl`a,"*,e^rȍLՠ/!^].C$#=GS *I E+1@@˗:2u\5&lxKUX(s(Y 6J)\иwa\B6'3Cu `(.'n\C/!Z{[lЌJݳ05N>J46z$s154mJۻ jz[GLU-ޮ#= C3}S,TסKk5_Cka_*ЋI:yu&jZ\PcM v!yiKW8 PCwO2gCFe5LI U):/@ AU:72d8 %n \P,ZG>.bvt^qI+QQu=7SoL*8&ED'@)fԫF0GF:y<9(_w3@>KZąz\iIS-i #Cs =yCz ;ts/)0: %Tjo7ANavRI*"=f }V`/=1|G`&Wޝ0&N1  彐 %s=&sڐ&v=fxqrxk2;Q:\bQ{3rX$[+8 wxz#/pÕ\~Ո| n 6!vrwh%Uf-UB=(#IL8."$鏕6I ͅO'_Y,ÖF rS[V"lec|TCC-:_/";4+s Sщ{vCNf^H:tɤKKvoVfOyUg̊]FK8sǯ | x;۬~o.^L0mUt*wgmÔ+,b f3z4ד_\F w^{~n9&7AHJ+kV>7<ņ$K#k)G1xNYQ g9>YFN:GD5onpoxd]?#L4/@`|:ͼcc-U]ަ'K_bqoLw H.;Kc>6bjiMCߍlz8O3``aqLt٬48OȽ⥓% gZa:usm?"J7yIJlDPVڃ1eO a j59>B!GGg uU ȝSV K޻<]zq3 =AO5Ԟ"8`w$C B=BE[k q]m2uciƴȭQ#2mx#`~iJPvlj tD9 I6_tt{Q=Jܤ[jl3lhqf]ɷ>d:O}7UpV:ѕ7/HL`w8y5pDѕ.w{P1e&]_v|[V@ ׊鮜}[-!vk6}ýP(ٱmFuܷ"_Ge7o: al66w0lKqu.ȥȝOa,^'Wv£a 9SikK*Xۨ7kJ/KJ6TG5o~v+/}}я/qZ?YMBj~|#$aX1NL11bR2<-W};@Gη1s[6Qֶ M1&] tJFP;XY&/e"5զm\]RJ%MN CK:8ŕ,tvO˙b/`r~5{1{ÿ+#{\)KNKXx{3>$Z7=.m ˴o(^-1nXA`T% }wиύiEpVqm4Fv{[D ˞'}*q;r$/p=7Ĭ\k=2v:<{Dϐ "}6S!1mOE_߁cL|3oE}blݯ$h,4YbL;^}ڠZ1p`vU'%Uf0Կq I;k_TCCZ fzMA:kgi xoReo0̘hrE9oRoR}N)#^IakRiN:u-16у{/:0c4LN?7q*qy"o55S%xGTQ`Ɗ7M&DUNIݪPnT;r }=Vm k;LgQ5dŠM ۶Uۄ" }_8VrmAw7-i(aCq7{B(|مsE`}SMa%8cb YEl3wGm8|< _G \"[Ly5rSΫGa%0oeI׹}jK̉5KJRŒ$D-iQx*C}9їd `1NYp緳Y쁌jIY`OvW~\\˦ A9tb-8fer)uh2vvnK^nZ5 w<iB;G07.Ů:./½80a;gFp~bT71 ƬwU`!i]0] yw3=3 YT 'l!_TFnpcdae 뗋x}6Tَx3x1L͍>&0l+uG\tdSGNTKxL,E-ĪD& QI*'LM|Aù8LacyW4%t88ӫɀc%03jc:9oDoӓ04&-:>|Nq ^L'uǧK[RիYچ0UG2(s$9ڈ#k(yUs輈ZƔd3GY0sa[)HL(^M 5ZMoۺ Öu5²֫w?wHNc_2 ado'.XT?őP\}ßӋyGZI g|+G7^9??8<"`E B[{u"?ȇ3ݡq{;1CHpTjƍ'BwX"1^9bURATAAJ}@A N1[˳6vK|Ahu5@"p#:Λ7CH ki-;#aSjh>A CL{ !D{#5Ewu!*AvaؚLqsMHT 6;Qvb' R̡fUMS`3Ecl Vrg[xfl-bf6HXpW zo]&Q^e\fI?~ސm4\;ETHvJ&wmtN(VNAWWSIE"QUEl]"bqĵ R&[ ֱ.ud `d3fP sImDUrU/XelslW1-!W1*f.EV oȸe2 wAױIFz5q q'cV9B9`: 8Ei;7B$!%cջu @O!Ǐ, `?ù } I%neLS?VE{4UU8fߣzcW:M{TW!KI _{娞HKS"*c$ܓ~f*䜇=J /YqCzj p\к%!9p$ ^a s m@Qݜ7"-8zjrN'WE+Q>TvJUMH.MmUn*ʕ.PmU{ʩai" =UUL긺RK KHN̯ǿM@_麤 (67hq+Snq@9Cۭ#%I䋢IO6R4lG'F 99dcU*lArgtWSxNC7zgsrȟZu 9uĀ6'V99dNj({Ǜ{[v9D!*K7'?i-8׫1WM-9-AsJ/AѰﮎ\kՇruQ>~JoߨX(-Tg`*ܲr_vtIq іRRJetP^B&ֱ%THX,V 'X}&9;Ⱦ4^|% ]l|o qf<9+g_zG/K«7--|ǁtbYN^l\:rO[??SVxʼ&"h7WC02냟otf?Q?љVL .vv~q9;<~>PJ{uqf oXEd$3ͥf)"M]HRZe]p=뇊W+XJ'eDgZ/')i\޻"S΁8h3jKS7΁Q-6X{g'Ls"*K.2Bz. ^΁>s\îZDݼǣ+Ettl0h ܐ^6@*(d3 j}me(uyI!=@u-#HM@ #W]B[+nwtFuk,k+1q:!zJzO#fTY}~sY *]֦!aɶFE% )q>x|QSd udQ[)I^ch[z#[%jXՔm +3ݔTfqj9)}6BsϺ3sQ5mκ\Q&5L* w*Ng;vR1ލAtybީfdXcn؜vY[W9)9CVJͯ5-=ْybA#90,ƽF'u.uEb!I#Nm+c,=s/O9J5G7gs,=%*Kz *;8"bg'"VS`vO`gA8W0sRv9GN5 'h;7xyB]jrcqч}ύ6k]TK_v<죖Z_Gug/.{1* rӷ[}ke-m#7|W3tR Bf;߭0 ځNc=_<5V |gӇ tSwFŸXYőXU}Q>J+_^J~ޞśSacZGh**}^;VO+%2G÷jzkcti-/KH*1ZtRͻK|~l{86[5P.B8j1k5J@ŋ] F;>z*>Įjb*`}pK¨Ap7+Y =l#tl ]}k񍲋S%S],z\ZKiVO{bfNP0"ws|ZOaY-+:Fhu"uT!JŸ#C`@D̊5AsE,U GB%qVs~K1{_FNӔ*?&+Ηo<粆^R;XܭkvA5Á)ͷVWʈ )'nvےus3\:f!2^ێfS 21f Az`jDbp9&20k6,~k֒־k9fl4j3buk$ "zNku RuNkJrs}`OdIg7VsZ9mɾy8g~NrζO\z.)ͤZK )| NbO:Q+0%51 qຫO#8'Dlhع͌;LcG g$! c +Y4X] m #j|3=fN4鑇xUz/ƽ=CIJ'vɳ{c'\ ˱a;9.\3M^VhIOb]H_U`~_}j HO<rؑs G-1,zu#zØ@ bIOz72RSv&0MLf|v$|Ұ7%(Xck^pHXoq̟΀KvtOQssoA0{͒7"gw@G/ r~1~$ړmspg|\*?^k[ۙ53=d,۪=d;X<=0|֨jޕn6j2rl35N5AACx f(6o(vf׆ &Lp%9,| CBR.kQ{/; ,# 56ZUɂDx",i Ugs9 h^~6SIzF\hqP40j(!<44I.>O4l֖ᴡ_kJ'<[n9́~f3 MFlɃ` >}i~}u;z3̙f6s&ٜ |P)k֫<&;*пdqOvɳw`a% ^ۓ,tMa/iE1P*h^^|*XuK]l 3+T7\P*U5J\ vVaH }eV'ƹ@&FKTWe}L^:7C  T=L}#Bp0vH ."D]aMwL*SN%qsPw}SY 'J'5 ' <8 ~>;zw1"RݸWBCeY@sYlIyF;x XU3VM1cn@wDP(D,X#OJ >-.y6~5:EpM=gU$ Cs@T$9ad=kެ~b!`uU|?.Z4W0oǯ/:ޔ\N\}V]<=x.gy1 Ҽ}ꊉ|04ƚynٞA𨁷` pMn :W8Bt13P iWa[KqnMZ-{ } W1}R J2bci|\>m ;c1OkŌj.-wvZKQtUb[oS1sKSwŦQ|d\*oVg;Q:{BMdD`'CFhqSs*RN|T!]fD(fƻOˌ;^5qZpͮhGsA`BJTI{u 148%d@:<ui,7 rqkm=$|jHA ݘ@aP0T\=ۇY<~g A^#=;u>l),RgZs4lBXBuHTxPèιCfX)XVF ;A&Rѯz:$'07?p77߶4H4ƴճgyA͘yp(vgekA(]M ʥϜ֡$XP8{h{>H#Z>k05zۋՔy\[fx+Q`2_[XY,poԁb,ç TQ~Xߔn(D]'okP#Ӏu#ԥlf;$5#|؍\Sr2*Ar  )޲g9Wsk%$Zk])T(8{߅/Y lSK)S\YF` &3ml~qoe4/^}V\ŸX_d,{Z\-q٩75p%Av/[D\dG_4Re :"Lbkp Yk>ͮ#diO.GQBXA^fSVxnyBӭٛ[YmR6 㶋rIrBGFw`^}5OS+d\CPe F쭯I 'i+:O]|N@PB KMÄms:DZswMM2~X+q7:K-3́5kKs` z 81}A}Ԭcjv `YlfW<-Gg PDƏ bOZ q60s8Q Z(TӈFSu9% -a698DɍmshLպn[W7Z8r#2w]Lg+BH証F^t{,zi_bY,NK<\4)%_Ǎ>=߶ XhT3IGYri5"f策"pl.쎖֦J=Ft}W@-H'IV/.ۿnz/ة9%= 97gV CwR,y6D)Ⱦ9d<ƣɭ c $ؘͤ9|^E0-Y(݈̈/A9 `BNrQ a}$޳4Ѵ5Yc JXfBLb LtLKӕi'&cw:CiWh#|LO0}]qI6J9}O7?yR_wLB]atsu>>zOGE<\ٝ~cZ*$*>̌^Zɦ&PS+S =@=uYnNU5W9ƎdV=SJuM5Hu̩9SM91oW#Ě܄$Hf nqy5|@b $;uD;r az[= rwPSԆraƕc)\>i[Βј,ζò>v-6RPwJTނ+\-=,%#w#W.8̪rjX|vxB+œF:nӆ ڕO~l#?V;}!4%[] ۖ4/LD&'Uqx[nα#Kgj<<,BM,JZý?}y3^\Ɩg+pp.ļ{ۇ7q7gG6Op-/WKH4_?lt޿M/u pdcʪtH2sx[%w&/]e[5\n/DQ\o Nħ;}}Hp}Yd#4˷KYɹ%<i:, 5a`H>f90][o9+y]0.dfbpkqq2OіmYj)-vlbf7Ūui~xXԤ(Yӳcz@9V3fkrсvR* K_ 2l=l}p"o^q*9+0Ў_qanh7Cmj?=eWt6y=`x.G?lC'T1W[ Fk#gh66[蕃l=qHο5%Gg@/<^kgzYC6wyun7埖F a/GGlXJOX#?ån|sƒT (fՒɥTIS6[PŖ %kv q:%*'A Na 1;ܑ+{.XA< d|ðFK&ʠBʇlQJLflE}C KP K%as>'5V$qVU9m9x\Z GHww>wM?|˲mOAĄ݇1f/b'H(J36Jch ¾j{cQd9@PM0ZIb @cJAG#'Ag( H[D4/ak;W3`28Q(TqKxӎr0-j&_hQAc8mW@]H!0T|9XӅ(Cޥaom%rL%Q=Tl&QjŒAM{hmMVrjcѢh)DwoNXcyB+׏C/#WNli馓nnrД!{joF;G<6gnGhpEL~ky gRA%3'˰=a=7th1 [<'r{n%JyfQ/WzĐM]|8]]FDL:7Z 6Te[87o(ۂƂFW 6l1pMlj虜KxQidc˨;t-MuAN'fV'WW~&>"RMm;o.] V3- vSOcFHYki&'ή Fk֬?3fBF?|ܨ@䄢M >4.KV#֧6֭k*E;b]H>jR S®Ybm˘~ jbӨh|`;lRZb*Q,C\ lt!7H}>KIxS1.&A5{?ZIDaG_on]=M?,źm:`DpW+j )Jd02tƁ-lb"LQ%l{f*4+5BTGO+QD(B>/.S4դ,ArD`7# = sdQd@VX\D.@E%QzA6= h5U=3d9XWmDm0*y=On%4{"G "IV,_D8=N;DOqRe_fr-FkE9]ۿP4U P7QN i{ 0(Ι+EB(2h6zfmg-)[T^X&~}𣺺:Z;)VNP @J^(ۢ%[1a{ݐg/!V({qȜ/qO&QԆ5ɬɿɌ;fȳa2\+n!D˿`%!$+z&0?'.ą5͒~xX"pҤ\bo¸`(L (v~KPXTeCQlmmF9Dl6ה>XC&ڜ͉|͂O qx MS o9,=j%eGeѤjqnΥګe*Y S.1XCb!Fg=l|ԚbȽ EV)IuFЭzš:+|9B A;q%vv%ΦM Z~sB:1Wk$c5dUQ 3T*ҩuhTZۧeG` O`19>$-+X/i4W-ƉƣZΏf*uDaGݐgc*;+, NVkT'{=ȬBiށ:r/ʘ~JoP~\y!pL:.qJl4h.@`8'U *۴6f p`gg#*VnѣP/5;;gg|C|tQ*rʻ`y V4+LeAI;b0<}7,wL 9g0%!b0~5b!φ"h8uGȁ9-ira[Ku LW_ڇC4}1`_PY~eڳ0v1fvLjq#+]a9 `EzvpK)3A21sj%nѹ]|L@#C t%g} Y=n\CO>_4Ephi;* ygۡǎ !攐NyeL^L}O=!4ܛN'Gw,M8b,v;_YN'|m'R qWc{|5P6By}jlopߥ% 4^%wzq?WUֆZ hRtoاTKq-ⲍ!bsl .[ s-SUfLŧ݃~9 _>j)_˜̷Xj۝ _/G,Irts3z_+zW BN}⤞ OB-C1y%k=5Ft:ח%_ 7L6o8 GrCTj*_pUCGq#if媍.sG={_P0!dnU$$i<ü'7Z; ~ۘ`f!φɼ32/r3&#aS#X?{15Nbq 5H^ً9 #Ӝ}9IY1vm) 0/g}QkSZkV]QwC ^|r T / ͨTZ4eU5?-6^t֐-2䙰W,P2 v܏xsBghM؋=lk; = 5}8nKz|s?ڂE}|[ѨjA%LA56eW?z-Z ]9w+0 iw?}ʆ]T׷ŏQṄe7.,M+眰s4eu*`k;/A{ 6ؘܱ>;#>/l]I] 䔄U+w *{o#`!]Z|z 2ˋgme<nȁEHnjky\KnUn>Dx(Tv3^+łȳ9[}銗߻z~cD(ZfwThtv=߁wmF܎w4\[kg/U+MnK̘$eIƐКi"ٵxhH'32>ն^6^5"}Ya{VOP_Jٹd" ۫>TJw"&;&՜FT$M !9K*g"{&O߽7 !aT~7nnԹA1*#(ZmpE, ~xYV.JgZǝ`4H)PQ2U# Z{A0kB}&T705ׄꂹ!n36oϵ{i.5x試!թQjR9<,-[mKDojbپ=l. |9k7a<H+Kp#—7a3i%nD3̹J56eܼ{5'ݎ)iXgv('5xA Gu ep@J̮OhCkpO\_rj S%נR\̮&& :04Pv<.wLU̯L2[L)-9 !(^XN]~ZZ}kmPf{e32)֜Ͱ vFsEytN##pSHGQ6֫>0$WxRXI6*5)$'D)jyͣc3ˆ; ڴ߃AAA !BO -}$TN$rժ"ZIWW}YcWa(W+Td j!lHxL|~C&djwԎG)8M`z&fj2jP-9@T4SMjNԮQ jo3ZwE~p0rhF5Ez;ܹ"ӵn, v| zSEuy_?;RR8nZ+iG=uyu ğ>ѓwU.A\~\)]鰩2U3p/= WhU.Ϫh%|.pc~# Rݖ#Hȇ(-8M'/t<*O=7=#-NR7z P76.'h^M2JP^$5Jŵ iSPX Y/cV$.N`MMs:^#4*NB%"ȴ n_/c&blJHD@.ã`8ome;@#8yʢLjJzav+8:B, 6PCqpYʹ n cPJggJ(C=@D5;LGQ(~L0^8F|ƙa>\k1+aH?T,pf9QF#g܅&Q_hJ>~aag0Oic)4ndCI1Cݑ i#2}^J ((~Ixr9Ɵ|?ds[1r11W/hNN#~UJYQZ4+L[f"noq_댪ݦFc'H'""Z jM߈4{Bqw4CT{AC(aݒ)fN#Z6)jNa]>ks.ՀnqTEK!ݨw7^v׫[KHB$̜+U_^~8d?iV#ԥ#; DUhj]ө?|Uǣ"Ƴޑ+ ͥeW,j~|7'we 77;5PtיbjЇ8/r+k_m=sų}ZKc|uwmv}9˘-bMjtZtVtlcxCU,HHCrz O\B \n?GQ1ex>C1j^ Hw"h8ZVpk (luNʺŻ7JNbQ5]mѲ mX֋谗 lGK&>.լzZ-5m|+KigOvKA-VG=#JmN(Xk#`ݯIȫ+Sⷣ]2oY :ab]sTu0e(b1t?k'wQykY⹌TҬԈG("sֲN)oUQ?cs9Wpz4&V '`U}qL0Ή| '0hVdWyv77Jk j=n)ЮpRB >L '-L:Ei6-ltE>fiF| 7ji[Yu@U+[@1y1E%Ǟ}_Qey?'=b֒=`>`^&H wW%vuIo ^Nscr$!4\Z]ݬnskcc&'@D ^TT)){V;]JCK]JRB1ݣGXp%U(06R;p#ਂX мN$$;L+J0Kq_ O wϦVW?Ǭ?q8~^uJruM*)9:EӦ'ѡJIjެ7[b!Ttf'GBǦgw9n#T |~tDs/ezJWq$cf?tQUu{~ݔIɇܧp;bNU HD')u` HK@ED[JSD&Y+u6SR'{" 6TRB X &(8zM-R]p0 FU* jQΔBM$ӈ*)K%2!JsTʽ`؎`\k/ئTr>0k/MiX`77+ R!e5+,XX pD%p) u|Dcz8!8cc:Qp=@>8ݾ#{~N:S<̊o{'/&}6yimIq~ T9 AQEfUF,)jL2EO1CB?!9KޝLBl .t2lߔ0N.looe7/zII;Tx^}`XbZtym_Wΐw'GC'B9ⷴrSVI1);I ~oڿdžw1 1vS@ ZvХ)ҩAerAHa滟/=__e RMND ̣)5ep݁[9~GEPR۵7xJ#Rqe#,IpZ gb'J婠`J+#5OǗW᳭sY=sz.Fn(ʼ<մS*BvT(z9\VlWV*+ePRKmˊw$zPǵQ8#ro4n5YgI%V( u@UWҐFɌ :RXq<0P1:JoC46<;i79a#dvPo{Q }E֎qJRR8<6t> sF NKCFlxa E,”:(wNa"i:\~j%5m|Y3[ E[$b+}m=pJKlNq2ڀY*08NQ j}d_'$YWaA sg6={a(Abp9NQoKnZV">8kV` !h"H*`TFn1dr6[Ԕ+EI`UJhAh}+s{?"\p$hl4їB `V#J@Im+F};۲y!H2/AGfO1^=W({Jd'-F'#4j)|Dys]憒\NbmYK9w+ x8 k.ש~ Sc!˪~ S*JI]sV'% G` q\؏3c]Fh"0~Xs&A"U ߨ*m' p[ݖz~Rô䶼 v AA{sA L0 #đ4rX#FāKLf+aH1}8!p5+AS0 i,uAJbMƧZ#:QDɞOg:`nHކ(%&Pl菶i݇< r7)eY|8?W;6^ 8jkUGʨ5IJHo qEwo))$7= J$X*K#Jjh7V+ HrrgbT`?l|C8ǰ– bGI‶ec䑎ç<+2q[UL*y`4JC\d֟YSeWL ޱBQw3l>ͬ}rPK/~Z[Liy?w gm|J%$JMDLJ%VH Y Ma[N+|۴6T¼`xs~[3oy%B"rCITH0rvj#RH~@(} ;3O#Sqٿ^RHPlM&HbhkITA(Tf4;:Tn||g[/q]jl670OKܭM rQbr.o+2pfa >~'2jtb"(EҡT 俸2Mk m6i"Gw CTQ qP(SRXAh,h1>zyC$~./^/G(r;A, s0}\2̣ٞ V8W$E8c`B'ټ Ƥ s)"dO%(~NsRk *A7!X!a\8U:8e8T%N(l(%D'$)F-EI´K1>ѥqq9RōP .T amlj2DʄpkS0\cQHy\;¸@5{IV:RtEoUG%9FoE8ltjQd1BX3E&c:9F&W!T@Q 8]/A;T? :{-2eݎDd*MBSHa7$tUV#E2cK$udhڞ\5|B6_ܧƓȧoh GK57 yXqPϫK$| R 3?qB0/U"sg=x' *)rl w7ogONyw3AuZor»b@Ѧ ^J0ͮ!F`b:2𩗠[*v^eym0Mr=1(*?Ř1ISp(U ic)"11'O/4o^UdZK#l)nKP)J$U(թ>_Zb0TT!6$0TG*G?Q2] KIs9z\g7$N߿BWDqe&cAd`[G2db6Ř% õ6DOB&"LńbGbp}11O%w2lǠmN̍p1~FqDt E+[=;{U!~ps;nDשϾJT+I#R3 7"I|eMZbJoZߦʱUЇ(BBjC i`M&|)'ӤU&m}u`27Ë/SiDq\op@M;g> A^95 ׅtN/=[ɿ<ۏhlq߃-`-6F7EP`և'C5;#IP->˭>it@hTBZE>~拥Es@9O< 1 &f4پ՜hwqka\s(Bi PBd$XD8R2\" lo]RZzG.pE0`"`ά[:Rq}b}({V<ɪ(]ǩ"O)ZaJQ/clT+nOVDk~q ]ؾZ hmDlq p+ĎT<!zXDe5H2jq.F 7ق?K{0 ꕺb]Ǫ x=KMUA7W-.9U%89;n{ #7-wNoƮJqT}T7qٷؖ o"`]]+hNՃjUc 7;J4qXKe?Z0M_۫`{;veW#LP|118 :RKx|`13ga<4UNr$qBSmo( ,Y]SL%V:N ,\B1:58!1cO#Z*#̝BՀUcd>F82$O4?wEPBDhk D;ЉXD;̱1k(eBk¥Kq1؏i 77_&#z;I2򳰧"_.l+YMH:BȾWB_%Ynz.J([ Y;)N)QgŔ3-2E&qL l|B A!H Y5'%؟*Smy1~FAfڵKɍʎ>6b '0me_NI>ر?u_ZCJ)IU[LΛBD)@DRN%`$Ju"yjEהX@PQD$17CD_4rgdtZ,}]/GY+_@/DB~ȉ=Xw 3?)aTõ֞qRr9ğ #G=to3;<|dwfOk;=MUOũNqa| kk.wNY Lx}Jg 8mvီՏ|av;J _߾CrM e,X=a~^i2 f@ &<xucf0Ru܌'nVWV-T'.`okw=jFVI2m<)_~%]/ACWR|Y`b~c}@M;|@(6ىJm)=} phDwV磳qhvSstXܟ svv5;+gIoʮ7 >j0eg>};`sX?N_/..A~d:=}9fuי,`:}E7Ggi>Ζʾ@{wƺ9ؾwoiӬo\8}dOGa|Nw0y 1@E`8;qgZdtԨxԂ4![L@U]l̦^z=7uٗ`t捷5Y<<2|7ݫXәd7_Vn|dQڿ?0?z>3PvԹQ|lNםo8|lH 7f[yLXgu'i~y`zMh O.$|Q x rvw ̮g7LLO{qn^oMf*.q۬ʂ*%sZ.ljѩDc@:6Hi$g&T"OuG#͐g N33 ;c̋a>VYLבn4^|r 2l9mvC0)?$|θO'\ 32|:){Pw|HwYe TejF78_?{WǍ/=\R$a/^6|%4=bHӚ=0lR|X|X(F x~A_EL+8+TPp˜Vײ;wP?5-Ie:\.5_ݙ =V2Í|ɝs 'pI4'dL3g2y&ggVz{_RR9%}_._x[XT}wp!Q(3fN)m#SH.@ctb<"i=~4X"M~]CH":%-+AW1TsBr\I. [g%KeM6F|Aa.VUBX A|Ev]SqCn%U?]MyWL1ɻ ~]k-}($)0 A2*2V2B|UMXLUKnS,їZȯ8R*i%wᒟOdHOgﯶpIeP+Q Vt"^.%1lԗ%Yq} vׅ). T%wW/'.9qɉKK{C*sQՎѺ 6y_De5N\0.(OZ 5U0UȘ%wJ%'.9q>2n)BSszқ"U}ZX{aTҢj蛨C ieK|_4QKyD5Rf ;^ 12 n,bZ=Aur8%6tRRbU3*4:Nˢi &90˅҆ȅ.` Ju ;mRPڽ$YSC Q9,8RިD$3o}˥Moroo~n`vٳ Vry=DS$u8#1{5W5^/zyZrqU])7?8?qקspsJ'A(=.&PaCEq;^5 dh@*J\iMACR5 6Lw_?7w/V:Wʮ..hv354j.C:U]*\N>>2|^smR(7#)mH~㗋jYooŌ!:N $;irG2zؼn} #m@u%^*oAW/Z!ޛWC*@ٺr"eT IK{vwV~gH*꧕N*%٘ g 8rO'QbZmU/H#ǜ/^+zOTvmyxSPhXˇ`Q*{D*w&hGeΘX:DpZy7jqTEL6`Fuel[|U`Zz 9StpzAzF{ο-Nߝ3$E>V$F4&(ieJ#`@yE¤ h^@L:y 腭 F8 HMukQ$~j9]];9`qތ^4lgaߑ3kJ [.lmFL$-99$cee,_hdZ_Ox%v;Z۬&vi^-!7^,rr!m4Lo CC qCj$ʍÊ 2ILĢmT :r#n`m ]ޒT$[HJ<3I''wU]GQ9-Dm^ˑu λ/ve]^Cg7hrܼpW`4L=>Ny+8u,_ 1r@ u_W[=?՘4ZQit[e4E/Ox}kOekU{@V{+f "IZ۠=y\Jg+ zg%{єJ`Tʹj;k2ERA{':<+v~V(m y8 QڦcNN[=#7LqnX/n$+LYZ.l0}Q+4Yd3- eod1؞cYG?:no\:92緶^=y䶂I3m tsO.n7󷯾s?o^8cA@^lWoN©;;˓O4nD Uns[:T᏷C@V༃wqXۼ+94ۜnH!ѬΌTҳ XՒJQrluy9؟|Q$)D&lZ)f Ľܟ%s_ݪ]28]F4qߜ/I'`9L`ca)RΘ4 >5ȷPo9uX`-Õ ?yƊ1(u&譬ŊDE~3RxY'cjZ6:hY3\0IGMf7* hRI޴%zkuŮ">kYI+!꘼JFqDCȕH DQ C V P:8FuZkJAɛ:k@23 $"ĕLƑѶqR!0kJr hm? B9giZ'pYElKLZ:N rV!:#'4!lk'G@ݪYilVZ|4DK~/{|vG?ʽ|׫gd[=fh5çU}0~T~@U8bȭ{UWxY=RM-=r7o!*_zWiK{#[!QeTfu'5] ,=}8)^k[WsO6[[nONrV{lӹ5  {4 ṗPpUOx$RӤ#Ԩ1T*Hd&$C#RPh#]xieoVwcǵFmzf{/8i`+o0GR-VA!)Vr|_F.(~Cmx5b8W ߧZRdd>y/2!GfXZ;eem֦8rʻ0O]öCkr4j1,Qӣ-ۡ-bK[OVr-AJ_T,V_'1!SfHG|:hƍg.[v^+V#Lx#smI١{8(y#iWCtWX9̦uSȎ ŠDu~Fv8^J2խ}[L  uScnC1(QǺҋPUsrmDK:%^1Xn?}~`M`Mi[S =ֺPTcq&)Y5GdBI)jc@(8x:٠YtN Š~ h7~ȌFG04ZFEk݆F>e ]v;ٗ1ȓSDeDYM;Ik~U4h̻ϋ0CJFQ<(jC1YCz FCF`d) CQʑZ$&Ů R>? ςēPlH%HWum ]4Vyha,iʖn*KVfƒj0 '7)Ł(x`M;\pG[-$th3yvS,v<.٬um: ߜEcW-'Q̙x<-N9OP`=5륷h5e䲢SNG [\+=rAzz&P"5Vt] \UQ;mp% 9A):BS"06R z' 9,vYg8!Pոc4i\n 1(u[^z@!BD\f0]5z/$Qm*G}~!u]F;_ӝxx- èLͿnݱxn,7㓰A]*a^ h_K>(U p: OFJTt\=}YuT9k Kַ}߻?>z8#Jr3Cfc]tda 'RݬX2G D kB&Xh>j)8B=o⺪}KZZ,Y Ք (B8glFQFFs4`o2as*崹Nj֥:C/wG-(UCALqwԩןM А\ESt F3x?oʞG떊Aꤾu/"D"$:n}"[hN2sϺm%bQ+vq!)nFn-hW;:Y%=I89Q&euV&eˤL֋)Ğ5ʤp+35+e8ZcPe\)AWrx0#1,eE:"Ha1X+\N,&7  TL1y9ͥjk0_45!T`waRFTCci5y%ҍb9s-ŠČgs TUV!9( Ť |3jDU*1' sHןUN)֝ҥGtmhWI:=uޣuK FuRQǺ}Iqw)ɂZ64+W$tudŮS1QwԱn=zS]dAc[NE6tv9ظ<l)D,|\DŽyAE|)ݬŻ,0"D+㗅/9p~o^?hKyew-џf|8YܝU]]߮y`N~yqZ]O10/.t^?Q؅/f0.+&*gW7Aa ËQ^!{乕lcr5fȩqF1(Spr'o_Wk̼肚$.jr48gdvc+%o_yk$(ۏ Sq4\s8vX;a)1 $ͼy-^xP5a,J!Y 6`PJ ſFoa3 V[ \|5\$pFN8x?Y9n v\ aZ,D-J:A`utD zcAB"9e4xQ3눦Ή ,gJ[Ę,WnT *IJ*:|88V[$9v3z c>{|/FJm73wyca=e?h5T51g +ý:,ܡpJMpagiQE*JRhNl@3#p#U[\e!% O+Ih TKoooc$Y9bsaȡ\QS`f*&͇d텷]_eU酲B8s6eD` Z$cç݈"JD_478随֤9@"䑒HoIrR{.'q$5f(S5ylQ "7tV~%qBn.¶ÛE,o7\A›#B׀.xsZR+_Wfԛ`r  Ī,!o;w/f?l4g?,@n=|oO 4}u|ݴj)/(Y$`"2LЭMiy)4k./VMf?L` : ]2.egb"A%w CsE8;i1߅Xa 竛K3?[|xQU>{of~9U>pSunVC:= ?aZusfJNx kw]WYMz^TJ;* g`]ftS kWiwʷ9Ogac}`RFv;Z2)F0.dp,_U?MM2o5.a?40ms,4Tsykykykykl[c"ҝ E+qHE*@ap.igem4xV˥dQv}#II&dvbF|GVA׺JTa#@*S^^6 B-5$ |FEE}.(=H|\_&_9uТCfz+I]>yk 6JCUPoZ'= ^[5Cug>&O;WRCRƮT2q4\>6,$=dƥdlA(w_?xn~v9=IN8Pf YamHz̴Ih;b?a WxE+ ?Nt\/Fcў̓"xfmNͰՍ͙͘X۩k0 ;OaZS{ηܨ.}.K.}Pm^(F͇LGY#d,,CCsd\% Cb~D,MY9GgU/(v?$ȗ*[5J;8̗-Y;sC-3j9WhNZ֫EsTMZ{ͩfBY/v^MhN\K*MbDAMje%NiҜT1P,@JTs C]x{HKl(Ml9-׳첃-}ck,f\FH 4x&e`J־~v2pySfmu]yy2OIb'`HLn>OI0|HyԞl/R؉r3!߸]58nRYTh\1:cԱnŏ S:ohUֆ|*S >'9nb"vsŠ긎QǺgZ}[7gFZ64WQjvvqdݴl*:FzզӷusfukCCqmS!jih\1:cԱn[Lu̷usfukCCquҩi{!GĮ#lǨTv\Ђ|*zSN1&X[IϐԑvI*][$omҲb<HU:aT@^WE'e2O2Yc=_0{ʼT$7*7WA9jT PIFdh)?4 '0 ^yUXG\& & !N0t`AdNBys05E伇d'Ĩ%"x$,cq%%K)K:&GXVlM!ȭṶṶU [).){77;%R1nNdkq iLNR׫$. & r&fnLG@Q%&dH aJ&z0(u| b0aS4 ̙RqF"fNRQ 2Fw% 0oL4C1 ;r:a1 JSTޡFJ-C P9: X~1/-f q$A,:%*uARVFdbj^#RIr#x7]Z G^ JV4㳊J͠Ɓ% *rs 9}7 Ti$r0nYH!ά25s+ιĹچ bKB՞EvJzqxJCiĒ4CHrL$E1qo$N8$a8CRTK2G0u* :@"" HbБ@Se8XH46TJ˦2;Ø%:d0%8(E*Q$y$S(0T!NB.1!S2exl[b>ēju;<>*S%8|K˜*_T{uNM>(( /*}"<\iYq <TFHL2$ ĔqzAK.rnlUsSbA@.eP3a,-MԶX7Dh5ƝwqrseAwjWKsd&Xh=KIrQK5Ԁ QK@,ODH½Ft(uz3J83Ơ! 1?N:,t2(w#P8Zsnp!#/mFg;^֦hMJzKDh]&ZK q]/zY\[jg^OZŠ 1/7lXOrێTRգ低m=L1U=bb/RM*\ǨSr4"c'*kCCqu)i, ֹmD8ݗvĘC_ˬd~%U.Dk 1_0cpΕcM A%ͅwIsVħ\ɸ$-*mKT*9pn\A[ÚӇu/lbՓ.6+Ø%-݋V>[G{\Ғh:r`ހ꒠%͉\>@YݜhHUald*/0ōћZqnP1B[jH3 v^#Z}>l}׼bdjQǗn¡Xiܾ"O8a>+^ѢԏArWiɴ{NL/.%5ޠGnRUy@nRu*v`r+)ʗoJ,l#5 @pw)\ÇqW=Ùύv~fFHb #.!K/ߣ1P?TXG,"*caBQ$,#PL #&C8 )%*ObuY'tAx38 fܝvZX%Gnn# Ɔ$\hŪ9iN +J1G"ƒ(Γ \&HRLipeq_8, E|t-9ů vFysM[aG J^E ZLLnr@푄r&/W~~-l"*UQjWWH6(i$ N\1q"eD&QH48*:\P|BW^L=̄,P$ J3Ӓ0 a @ȥƄ0BBMHndHP DA +.L48 e0w$Ji03JBhE ұ]?ےt=%~kG EtqK! /c( f!HRS 3f:R;"d,Tv~͸Å#n5h0ȸV^fKt=5ףFKJ_3%LWzXq ȗ/\ޗP˩O=xeɪb0ςh|yp\s˲whcd̾@+jj#P9pwJJtU!u/bacعmY]&M{!fmGQhMr#$lźV#.f97S2 g,3 Q4Y,,IQ~<2&6SvRT# "`\@P@:J%!N"Np2{z0k1xfeZ" %[4O;(ìM.5PGs?!ks_EYNw{rx 90W1cQt98Òݣ|̴&Tyab+?( [ﮔTjnLƘ#j)mM޻V0yԚ&Ծ/.%*g$)]cBIRyheЁO<KvE (LdYxnBүl N4OSk2CpdF,/^o[cx Iԟ4g,w`F.^R98Oː Kty| ]`tQ{'Hy3Kǟ_́%9 d:oj{>K-㞋,IZМIf̮˔TcL٩jy1M&Y3*aJ+=nI3?^ښvܜN,){!mN0lijR@o1Wfp]-ڟTQ)<4#=nBUTetQb447W@"WYB`)?cJwmДkϧ{}=_3n);2=Tuҋ8).~?kHMQUU}F1?,v,| , AVKMW dWd"2c ~NWI?e %łK8#zJgY0*dħ:TrqOUl ~> K{5Vc.l=Cs}5ByP_~u`I =lz׎_}ي#sOvwP5N=FB#59\横O r Kdܜ)().p0-(ˌā|mrr.GXqg3IXA4svYeSkŹ`!ym6;/^\^4+E Irk_DX,5lЁ^L)VRr(.KHF‹)m_|eXK&4w[];7QzM0 EP@WJz>Qn\ʼj,*g0MS=bѕbRJn=%'Dt _У:tk leo<)Ƃ',8t[0_t, VLkOXtAXT ɰ{]Xr}JWgahxG;OMQ~NH9PY?͂]K 6|P٩ ݂!Չ7Q9;]m _P FM&LM)MǫT0:flևQ:]]bOo2N̊7?o*%B%{l7xNi_c-y&1Ibbmx`m׼_S*>1 ]Ozp袛 W">?߃ |8{{cHT$}϶04wFsR=m"yXANe;dNؒ+M2KJMbLE!6%0%;("D3:9%U$~ޕ0+Tv|fP-2?âتiLd7UT+hcui5n;+6"ځ׋5"VKtLPACFrNMǛI7@' 3X^Z Gz.*noDLpMЖhG׃czzPBʢg|4'd+#gZ{u`*N80"ƁR%Uѡ:p)? SJijpiBG34yq<^R0aÜSyZ@}"ײkL1N禙b[b@/ymJ>ELz o?O{(JY٬|<֋z~2ofgEI%}zpl9=Տψ$*$y*\,wr D'.R[,M& );j bW:S(e4h5$7+Jeg3c%'+Tx(}}B9ϕ1]+ .R.be(1}ACԧK 姜~{c2yMC2ִ#"]A?F͗TSCVBWG/ԴF!:[s.X:.\B_T/ȓ6:&T/=K31usOSPrVdMnT.Xݚ륒Niu|xX$Բ5\/[N d1K.AS!6jMb"䘚 ^do5i q/~P,]&KaAJLnjT[fBGT;ЍRi;۠Qyv%@188θ9@qۧ2(de.hUnf B$_$#wwl3@ZڹʫZ֛TD,}Ԅț8!@BjTm\f(wԄ>MI$[{Ԩ"h0;Ƽj!AU?胙Ho!:aqޯ.Av?4ḇrVMPu׊.ﮋq2;je^`4ʒzֻ?eƏ3wn{ $4ע}ȿL+Ylr:{ߴwͤl碉jxrg3GS騮ڹmɂK,\+%5\ f9 GL=0 wz'ӇMoݪSS:/#4NQJ&- 2$!@و@p4G#>)X[b['s٤#h<6f/)pFdV֢M%pv+T4([nRe==MyrymA4sh7 ?_ap4!8% ׽׮ ~2(q;^"A`ڵ㊀D"N |d>G ܘ[rnUmԈNʛuef=JK`NoPԷx&wwO/tKrACfAho,!'>;+( P”3{`3IuX^Bԩ,QFqp-JS$Tݓg XCRA~|&~@~';x&b=rb7>WG6^{~樬q4Rc0Vյ: oZԈ)V-Y]n! |T \NZN<uVC}9r.n[{Xe}_d"W3NODܡՀ[IGg"B'/d)#N[ pj:xHy;Fj 7fUs'Yz009Eak_#R1( VS #.6AiHy< -55qw9垒k# )-d6:K"#Ȝ@ `:w#h>^nV"nvgnmYQ06@ÏQע=:Vu4(fŽ?,?kq>xOo{^q\58鳫5Hh9T?pH2?NVd1]xפI j sU*t: XSn W5`S^2Ora9MSIR^{^B`BX2D{CB}IWٰUaQ8?}q(V5[EL$*wo t%goo 0w=ñ[^hEjX2 K ׳,wxt0jdv$=FXd"qdY\?P'%w,S+)$cت5QⵜKOs.j4SeRUNW|VVƟFF~2>Ê*%%(Ii QJq8T~HذF K[P "DT`C! I R!T0! QE㖺]NethipQ ӎm]k. z%.M qx=K}#D0 Xp^T6ls^R}i듹}гGdK^maMLdz+ .)~7lVr<ef(\q`+)PR_{ ({SI}w.q;)ג#')vd*]F/f099.k >YLtKrz?7BBk:1T \dކ`%6;tM.Kᡓl14W9T ?LK βݶmFusch/tv|Z^Hf.$Ñ[ZoVqSأ>*J2h!d zlmO9_Q (&RB`AF>4گ/ `6DCeP ( [/A^}oKDv[V^? \DKJcc  y;d@=I0Gz}|ǭVIPmVS4Z﬛b9v:4=jCAF|SAQg+ ׀بc}vW\BpiD evU!^ HiӗtgRRN;ZMQ1LBGꐉ:hՄtcm` PXBX|88ޒZΞGK g+YXlVP14Oo@3 x"p˽΁eH@3Z'.oz줺a2']6mW@)8jА43K<Pl ׳wcT@ 8m,BۅD*.#+232 ]Y61dǺAӚ>r(]TxRMĩQ[<`B{  sd]}I|3>-CGf"h[BӬDj}psQeB׻x|A=OUo^hg Xfg8O>/Cw參4J̆6JӰA7~WL6<60yEq؛,}Kǫ1>!5vH! pຫ1:}ޢ?St<^LWiP[)P0Nn<OPadK {j%7E;}X\>tz/6~tzFX#ƍL9 rbK`9׌r6ÛG/h: ļXeMJ'e˶>NLj c?l]*&|H# 8$T >! G kۄwGf>mbbK<8&v}+ŦhNӁ+2bu%j!)hZﶘcWzmLGIgRq,XV>`ng-ٗWQQ/y_{e~_{>2) z*R״2f\"o?K ȁxП& vF*e-gkvSs-.q)Z4CK?㎬,]򏿼>)?}^6}G/g Y{.8UAA )WU? wq9=dž`{o% وBQ$kW>펭[%ݻwdtzج{/uFYOB*J;F݁st|'![qe iJ7Cf>ՐĞ _sQAG? }pU ڢ#)6F#u2P*-,zUo eo=G{;aaS<0HWy쫃 M孚y\ccʏ5󩗤@/?&khxQN[9GԫsTCj NՐhp(fN%,pAUDA\, z?FlW^v;͌$4Ch;^Vs7:HU^gG.Lzs;92d 0 h_>S0 s 0Ta8Z#/{ƍ K/[[{*=l%ukWQaDrM`7C`#I*6__#E +/D@ D"p{\ܙ 4㳱m8HoGuhNjgeF!w=|^^^ˡGk75j( В 9ÂL* ;W8CFi0Cp:_F5C-!r0 …t')@ۮg \BRˀM:Q%mszfT~ j)O~/-2*g+ICa,wGUHX IAr"r Z@> -)rUmiX;pzWShgEDA /DBqt ,@4'H( ƞbrlN>BTA,r|D"׹D B+Π(7D@.tT > HBPB}Ի,  a}DyvvEI$VKVxyp7_S-3B+UIhdn!A'cUT9 r: m׊ffRrU8Lv%`'R[{oUiT6٭\b9lf+ADU(huJ.[+ݚ%:)74h)?4m9v_ ]#3 HħB=o mO]}p\n&023 Ђ}7-{7aa 6HFh[qZzQKUr')Q܍\h%5P5νnQA!ȩ6_lTNza feRctö -THߗ"d9/D^UWS^駂A9cPXI0D Iuʋ(Xݛ1Lj1C938.bz-ٱNx1U, Ӆ%.rOxepϬ1]c>Z1c)`9!9B0dJ ;$=k df6p+EXJzHF3j ak7O0s~(S}IYgc?kw.~}[;~x5'HvOƾYϷm<_;-t֟Y Vf҇ ӍS@._)_?)]R|[YOeJ~d̀DhMw5z$}Df-0?\=1Xc=ΖL͠Ζ:qEm4-#䵸2~=8[D) }դo>Tu!ThZ5C0Z<Ԫr*>m 9*'9ՙVU"SҦ87\GiU Jh~qU% Gks[ R۷i"DuΨւdFoMBĕ<.Fv?^[)# XlC̡2?Y0hBػ29u@8 1 mbA:omL;K)2"z9m@t6(mNFDJ:!ond|S+fߪ'59}z9(a_> 'XP*.7RV+wJ.G"NBqﲼ /QKq*_.QTjoxz 0(g x ۑn4q6ڭ_dSpk-\h-_'XL4#Mi Lbbq)YeJ#暱8{_4Jx._+Zr. b +l~m:\#u@KgC@F^厵\#$.PCu\:/8P sc,,gD<Y^Pȭ1ʸYȔ.00%k;!|z%EVE ʥbamC)N"#WY:z-չlPyy 8iӳ9YK@ih}[Ve YڅfX @pC%`B [[aLAQFcG cg(3q[c2y@0 1D+ 6y9$^Fr=(ϴRqy֭wڛ&Occlr)}B X|>}V{`ZSǑ[v̏ Y x3t>?޺} ߹Uٷx0yM}s1%Nkۃ`ć &$F sw!foTЅWiT f]g{}-8B@` x7̾ոnD gU?z!0L:Q#u=@ d -+?3̫@ gU=R:)5˥{3hqCEϮUy+2$K?TS,}?Y x4|&M1!jur{x@"R^H!.3Ϩ WPHEڤi# nDXҗ[O+B>oFf ^#9$g~lk{Bm:gqԷl"DC^^WVdqp@ dӌ(38&PP#jՒ7BJĴlVt {ͪ`C#n]y~D6>d G-"]CI _v8;<1i#Y+x{(!@*,|g#$Q)5lй<aO||/Ġ ')NB@Z+9j TԓaǑ/QOa d R}ڦ !Cʲ22CXU.il,ZQm1ӦZj !9%kYMT,ǯbl MC7\Rie("; ck)1*b~`S3t j˺^-@J@8=*rU&LBTy#vTZN #4/>DI %ydq4M/3ע,$_-DˡcCi!iP3ALT6NDw-m)z#vb_MrljD'>qO7v0|Q6v+:) z/4r:e:O#+(snmi+; ھ B‑ӾG$Ti#T݃8hă# N xO["p8O3NR(54پ``R#>Li XrN@| H|wE8Tj;}Lˆ!&KޛԽ:J)A5=L:vAt,ػ*iuu'_AiAg8iV~k~OV{[_' IcB/,%BZDk,Pj}t F*M4V ~|(] _x!7Vn<7LO.^g|zrd ͗%CYvފ$})S[E/Ϟ6@%hBsYQ?{WF vPRއ? lϾ/34ejERRdIYYjl[G*"ȈUe)hP\xSx7~ ۋ= s44[XD, [X:R\gkY^ƾ|zE7N1XI!T9AsP:gwN;wJ 4lLOsBs/*&C:p^1Kvyr !+@Bq˜V}{Āq2IPNKTNPz*a" ռp!Ae*`&tv)ެIQ R+eiYG)÷Zqw.t0v$j [ʂ79wY)0pP<(*CW$ 4(!aG!D!c2S/qSP$I [( SXk] c g) ;]Jqcr@>>yC#ⲇ Tf)oҧƩzzg`ukV30TD%K?P3;_)p)h| M93E"4Xк#X)Oj)ޕG`O3ԯ=uHzH IS~܄reuZ$#*~IC\ωjKOR:~=Υ۟BAZr-|] &HgE0 U,1>.Bۑ² =Fu&_ aqe̴ ^@ d( \58-&ѝk/VN{wL]qfv=9[}] f3p;'-xf'd()d ⍪SNTHgj\:L49xΙ2S)uEEqL8,j&inXU)dreL* +jGĄ>emPҩ d+"CTF 3D QV8͵9rU>T2 7J"^PC*J( S>҂bTRs>E3IjҚ{ᅋFsbT]ĮIɤG":_hXꛯ3Ԅ||D5i^Mg )*" iF!`Q&98ǰb[숔Ǩ< lxgH,$gbjB1u 5i|}/d]&J.oܿs4SjL;cNae4J;+.|z}Ea%iđyʍ.h3Ԇ(W2 g IQ̹/ẠUξ˲3d5ִ`fIlUsVPE%%Ⲿfjţ"C,E r9X$0Oaq@y-z+$%hP%y0?2KE6 ^WNP+ Ci&mĤzTa0E&<AR 9gLpu)E]ou_?stSDyD-L ƈb½\ܥ6 0 Kc7# RAh'+~NB`;$Hn {O SES"v< 澶|/FwKWu֡U-x jQ#rkeN" ;H܋xtd$*{dK JHfQ>j/-FJ9t(IpP(G":qHKpѡMG$4*[YUzC"6#;!E ExLm$o'`b<2X|%ÂpsA#%FZSGʟ$?[ʏY;cߟ,F-Ԓgc"([ZQ]xh6(#RgoQR1pJgήaUz]Mj %3Slon_o64F҆S0&h|wsBI'sz4 QPe7ψ*ԋO_{R6wܭ=)h=iwu9AUIy篭|&s,ZLk\\^]X WycC7P꫟MFl%n%% nk꧚#sZQ.kwh'Zdjw%"@G[S'U-3y)h&U\/j!FO8{Zq'g4c"jJ~RJ>?)sM& &yY?%jyǝSy\+<)mN1yz!BmM$Nb3;٣t:Flzl:Np5dLPIz*Ij3υ_}YtpStܣ⾣).x輿Nڭ>FUƣPtlM84ZEVZopL1ip-t<` *Mn_L^.i4Ԛh0;@u0I8 fiӃ?I>[}'zq9+' |k5OaSiqHѣ{8Ĕ%qH9g bEP`cNn>*HF C@ ]`8]-4CI ћzq=]ar)j_yX,$w >LlսoSi -W~ bhGS݁Җ"FSeP) Kg A@)C.CXQ9J`7.MRb'4R/$̀V+keH am\:H**qE`IpO!AhCy ME4jpiaFJ8x0c &fkc jQba>C 8Ft`Hk"0#Fr2)0A &G20̝׆FS[r'% x銢ۛ Wrnܗ;]+6gvRY5?>=kQV.#F?!3_|8ޮ=joЛտg.bM\o 69D~ӳ˷RBGV/ϼҞg}"GenEQHsԁ0kl5;!rH2I FP|j9 Ӟ`q8ڈ| x8D=i+l[ٟj D>ןjJ4mށXjٱځ?bhM\4 "hr!3Z w b2z9[S\Ư0?0о!e9è JX0M rHgY*IP;֌Pp%b/V(-U8zPۼjg;ZntaWa+ڛj"h ycF:HrP(@=T[%J#G䜭(*_JP9/5Xq}k(#A WB,TcL JI9]g-VzKE0uz~s7gK7fy73?8'Wf4)dѺ0iw˞b> %̧]L~'`^F.h!n>|J0a,|ڕ様OyB>h%3K뎃vhe(ܞ6^F ˧ of'RBZ].pz.B׈orfWkcWL q.Ԇ~H8* IbCy,Pet&"vy|XTCL ָsHtfJ?<)b4/ IK6zet&va|4^dE)idÃYMH0"]L;C~o+&tr֜" زHmc|zQu&,'F*lK D5^싓鞢;/|/WTQdVE^9<Q krP;sP Kȟ.KIBB*LqV9;tC;Lt2 eU T@Ky$T[aNb95 > FI,BYpij olvC5jhtYY ,+%J=t)yk /祚K~.x3B{$ Th<Ԃ F@$RcR;q T+{յ LZ΁n[`B-"1*4AGF:ULRc mŽAF1LF(2ɞ̶e s jL u9oEeR#BP@!a)(XלZy{-KyF3bq<@r-kP6TP{"FO,LPB\D( %HӀc,Ruo$FaȉQ` {g"̃3F8eU!le)+7)~\ޱyJgbOWR ˻~{h%0 NcoțwiZL7fj455n. \T/ ^{q8Ldy{;\ SvߨB"#Lg$o~]2*N^},]u!& sWe-\ 2I$a,P8v+5Tk~EJh}T3QT&9J:M %01$CyZjA<* ae(œ k M `+fIKda-15P'ოֆh6QBHQ^YnfA2L[i4QJ&Ҙs"Yk5 F fkM41֑)I2 m*ZFITG~fIYmh6b#vˑc@s{my3 Ԑ@sM>4* hAUDJ2u]qiDԯ+r!0)QP`Oıe BGΉPH!3$'ф_D,}jA@¿WlL: \ENc^@gcu<60@89X}/D SJ1.APG/xp7EL2V7P'"\1q/8eVP7}ܮ}gCza;Tv/#8Ì GRBbGj@|[`צ-Iux?7$cg9U<2}vX{*by7hB1m o'™m8v>bp1"݁ "b@,, p` {g:3_}WS:x_Ss˜ ˂П)||LocN`0Ϯ\4Y m+w*dQGngXL@ү W-sˏdX=7nQsG0CzE!p[W]t"4g㿻 nFd#fW?Vi lFdrɑQta(n$c命Ӯ2VvM6<[ɸ~Q) GFe+`#)W; W?F] F"ny*vLte,ꬹT](OLMiʪU ^J`m0_7nָ&л$p=&>ۑ]]ϛ,~iAµ3C=-?h:liA:L?4;\-KD3`cʳ1W:-w2(a،{}CaZ!eKYOj8LjHm:זF?!5Yk3HGɰ]S̶WhJg ɺѽv#=-RJ9ߐ`n,\#XO./i,k2w)ABIv"0`7K*&U;PշCڎT51ީ1(Iwg<le8~0x3T#Yp5`!9$oaہo8i@'tԍ:hK沵a5@!‚E{$PL(pPx]@}7߽^fv8zSQ7L\"ZDxqĽNYN18qjlFkY5`Crk/f32 ˭ob0׌|'dyfdN%Nצ2b|W*v,}J(RJy& L.g¤5(|,n`/y5!PEl>YReY|ˁ 3Jy_+]ֿjؿU[%&KP-!]-bsL ?O,ke F0Q+M@#6sC[T0l_&;٭Rj,_/mkxs'÷{yc93UOj ϲ,uހ]]CLr[nԜ{TRKvV|OV5y' uA(&cW 5Yۖ |vg & MǭLk"m5=6}r'=sOV r}vMz=5rvj$@-\wFv2Q,^Z^Y8n9wc>&Yg:1R^PQl̚KNғ\L@iIsB qsNfD}{jQʱ0@$ dW7]Wlnr|3m èdcwdc.lBkqb _dyUU'=7 2!Ո,myFwa bƝMmD3`肑O8z|oVWX#;26YGpO '7 I np=C{;%3 ?\}.Bo¬_t j` q'R`ajVhL̮5Qګ [mJ1eg,ԑzh'i֐9KG{ mtLF UnQꝾw.h g+T1n<PO)%/ k8ƝV/u 9nӽw4|jv1,v;kLNNNbHZ[B*N%ƨbe5^ CWmRNI֨eqr)5 AӜ)&mJ0.0Ź{5\=kWs}>&o8V[)^9^\ 9Hf2G%NpQ"g()T`lWP8)U8:o3Gp&24Xji9GlhᱶiIJ #v mTsy .$jpƉf Tpd#U@"7U ;|J)AFz)l~YE(%[ YBP%<{8D^x `eZ#-5 0),GRe\x)+0 ĭ VI;)|)4EJw|G1Kct!ʐW?.K$T)%K*\$J;MO 9lܐ/N2b;/: XSK`GLa4tKEfeMiLƮc OFC>(SJ~l8l7XÉ4(ڔ{,h!_J u-E4| kvۏ.CPpsIZ"*P'[7(vδFiD ƨœcg"pU!Dg 5M*g+=o!ZJA;)MAVjEd|_,wUy8byU[;2 ^AxݗSgx />O]yKKvuizp0k|ƻveܒs{uVqcwj탆Yq=һM};gh=.Ş'<=}? y cuzowy]CA;%5yj\j;~LObZnja(nG5UYtj&imWYeC 7=?<'~z`'{32.C y?Uh]w~gM htu?pyuݲ14H n N!19hxNF›DzE Oa$zjѾsx:@ SufttF66#g ۷1_o{軽+!ڻ @e$b:134y*b//b拿Yf>%sQR Ֆ~.rvPb[3WIbAL*Rx>˕Z)v4bM,;1jQd,qd4>4w v;Oa$v[Q "^\T%|Bh҈%# N"ևZOX {md4}SjA=F>8ꙏF.1J]!]ya09T[7z鄶 ).IcO20m t>" Q2(RjtFR 5b2CA?y=!J|kځOf&|e?<=)7'daBu0e)rߵF: &y6zk;SEj%Ԇ6Ւ$ [+;e K"ƥ%/;Hcqﱠ|?@#FsA<Aq[gP)%B9S )R)kJ#aU&@dl1Z1fTqJ,ZKʳBp-vy z!VpPy ($8vDP L/3\#c{#a=nOgV  [  vB"/<)X  Ric f1VbK՞rBQ*!.-p`P 7VT !Є [6g`}ecᲤJBRVBjpH:ij-x h˞AP%2Bjq\:XEk;,2"`"3A7@5(~ni,;D1_8O|E7qotd3އ}a7~ m,D8 :8QK>'~ёe~6YcUX1(gWA_,ڧhi;p v>ٯZKND A X!,R20EQB(s 8ZH%7rLƹ:$ͩy#5Is|^'mv>z$n:%x-\pƞ`l  M,s?qgqsWxzj-.Vm\>۸Xey5s5FgCtyqlb:<.);!4.g9b;UgkN+.s`qq1/9Aθ0w.4.m\ s}\LcN HPixIU2k`R)( '^q\L;۸dC{(yb"Dʳ>߸>۸fӼkq15;i\.T9_n_!YdظZcTit$ȉ8 ` \Ʋ  S1VeU訶ut嵧ɰ۝[djʁrjjlaAPoT=N 4AavF8r+ʤV$əSޔTc!xٚrjB~KUYK)gI &Z1=L I5A)<:S a)o &0D(]2tƜT;cfHk61Og)5#.Fwy:cN ѝ13,Θ3椚 FGϘMgsJM ѝ13zd513wm4eyiEgkd#3Q1bnuSR"Dc#^nT:U* ̈[y0;9نcn9}zEJ cn9z8f*gs1cw<% s1y -r{&qLl13mi9zzl9:c*8f:=# -Up̸̜ [y  O1sMH1>=AU[y8=Y(IZ J%[!O5mJv1'[n'( ɜ^Iֳf 0[]|6 f9gLIktFcI WyzÁ^݃{u[ћIoC(F_~}Nj_Tu &4_@diH#s9ˍ.Fb5S,]|k-nj[aM g %0")̤H\a$/^&` #o>ݏ1%RӋw |痃'yzE[>)OnSZ}{٫A+0l X3)0< gn}i7Ci 8'8*s!L4p=҅'.eRS7)2 Zq@Ut)HQG#1 >4bi8A׺!xk(aFȸ78oJ`GJDz`C/ w'* d!HGIqT9%!NK)d=DJA(+, nE^8 2!k8hi'\ZB/M:S{̙`8&MEe`8]>&D-q &|fHC5>}f!9AE""0KaUZ2c:a+hdY:Ճ./ BݫWl>Y\8@o~OH\>$ѼyY]Ɵ:7OFb0,$iW Ɔ2f?2A,Rd)Jx)]K= q*{ҸKHorh# R򠱉ܕW~>sL4#D#0u֔{|2>L3]7[4$ W WJŬ&d rY &?[6lBl2[[,RoE]ުeb+҅(ĿǤO>hv`7&|P6V?Q{}cڥp0F$Q0pX@ 317",O?dY:UAeq ׫L.?u}g'u 1>|!HkߦWpLZI[}[=RypMF%^2[CJeL^tmGG%5ltͷ`::+ݱ.5lLJ^uJ(-݃1'{n¼ۼ5{ ]i߬ժuG5O12u_hwHOcPGmQ0YS@nȸ; !rZ|*H" *OuE*)k-"e6ͱ& Be gS6dſз5yg\@ ^!NK\p{<}KJ5Өٖ~b_s13H/FQ(-٦uyuuWcbQ7?|sYR rNyb_ {j4=l9yut__`n0/VAr8Qyy7C[gh拫Uue9I %77g@% } ]\W(}&|T7G[DUWz52;I(*I>l.RcJɳbMjd%as`JX嵋V(:(* 026$jļ7:z9L_qvҚjm=8j3d9/ayhMVLC~vq. MEArȸKܛ0O.f櫲cj\(}eI$'q1LRdSY&t8|`DvgJjջ fLf-4}EmrYy ߅I>o2Dʌ5%vuѼ.6MXw*n>#H*gȫRs8 4nĹw6>y%/a.hC5k 5uAsSDl#p Nw<jB0SLT^ʲh2<^j,{;9wh@=nYIUmڌq֙1i ΃CN:NS+3kFK KM(Xݙt5+Q&̟KBWDzy]$ԚT_9r_B2EV: #3AVzU *w^}h1oOƸ閷bKƭ:;B [76] (}w#Se-]Vs\H +%KߎGI2W6\2|0ȏ^9=gp)[֡+-43{&6EߏY?/aV4R3µf\ZH^]ys7{=\ˏ4mV> peUIytWC$MU½=Bb^ów XC[>f13kTkԚLJz^stU_~ή۽WTy :Z{p`и1O:>8BbN3|n`UA_OV\WM,D|mRq4gS.HT{;ޏG!xO.z'U-P6hF EcA+W3o)}uظNp*>>]bS{S&Z$XSa4]|`q.Q9RV9t(D&/y5?]W <3/rg^=UR_?緵to^!x>P6:OSm(>&G*'3rw)#jRe?k15HdTJu5[IiL$oĘVC4Y[ǐ1h2Da=ՁDSu2+KҜj ByNPx"Bxu2 r9k-Xcg%_0w28`@/2Xj[ !<`_ #Q̕r$gB\ys'FS;OJ6VՕ#CJ}@Mm.Ѯ0&XVʴƭBݔLFV#^E'M{0s_WOOOٯZdu [ƃži:XR 氍v(6NМJ 15o ПgYY-1MuZ؟5}Yد1`\X`2,e8_hS ҄k edNXn8Dhb%e(p bM V'Av@A[6P XP@8=7MaTg>Q8O#c6K1j$l1'KJݓjWC6|j+Rwā2(Z 9nhh<=.05ƶS=`VYS9`Ic,ۡ3wE zuP H>yuɁ a=Z}*Uo*UmVッuC|$ >8JVik^=lW݂7{;hB΀3:sܪˣx U005tkuq0p Z=޾1䧲8mkA af^EXCqCDGuHN $PvFG.EI}ȓ렺(gҥK%rɿ"l*herAA`7ٶ/Xr2A[ՒZ#['bcY&Ufxm(g`{G9oMLAg6!k9V ̎!۫1[u`" AA\+ h˵ 56i(-qK,%$]ճ|ܕi"fX\]٭Ύ.G{23o,X٭N;u< ȯR^ cnAo /N_qDw[ԋ4}1j藏麠9P:·=jZ|>N&;DW [^uYw`׵l0^}M#r軣ܠ賧 &q2}ǘS@yQ*Q6T"˙R @c3ڋZ(e#$ =d p!'LEoͅx7`6dSW~dɦM/#^F6Z8+0s%Y-`9kt7X m ]X,V d+Q#]7?Ъt^HxoWD{%okd- Sݶ(ng1Lgqwd1Ԯ̻}X1I󷪑8Ӈ׾Do7x+xi&Oٻo'z6{8?Œ_x~:6{ԬxZ'/c4嫕&eOw]קyT㕕H[,lz#>Gea: X({:bUM^h]c*_~ħ-b"p d.f P噟|da8Q1.Dj;~tDkpLh@L[ȯ'3TF[2lq_nj;q1,T| /z6 ZU8^`I)5l&(ժʇEV0GC2%α/̱m{vI)u;nSnkZKtf r(3')[u&vRvIQ"7Hr-OrK>]O3BB,+4Jg.̀%Xj}&$h6)K'Ѧ/-$/8MvIh_MOlYim7|Goc=J"̎=0#ׁ.L}7K p^^梜 ؇ZaOVI*t2AnHfb z=WH7@%3=u Bm IP}F¡`J$`'8̀(Jc:o몎$YAFtWS.>⑳sicUuxpO0;*2qCng[m2gyUAUV.-E.gx2 HO>Q p`ꜢSG314uNt0tGL gߡ0txTV*fDȔK}eޕAݧ7j6k|g fFWV>_ܹ4 \tiD{|dLխn%H{=VCWp9*[!1-?| @uI{]/~&ZH#6+vYsyVپ~4Uܝ."9[,NwwnR},Uzr7^Y]Ym]ˋӴ[@Lfk$o4l˕?WU ӭM]loo74W-`&Ô nhZq6ˏ*F9}*?jDU,P ٢-q1nfcv;V6*ǜ1#8e˛ tiM;_'F!1\\zJd93e?Z.|10{|l,mpKװLRÑʈO~ >I]Qy2;RENԟnP?%>5ͱ1^߯9I#OdkPOO1f` д^aT(Ў fNUSEޟAtPg:wDޥUd,)ъɁqV[Bz>p'+5\) ؔ1saM2>뽌z/zc0TAJd\6GZqL\c"  1]7434*f+踙 _tR©ӞmH P־{te e/e.u-˺UWƠ(Cs_HB QJC6xnDBbs( DZ&+uR]7?]3^Q:Ȇ yI'y4WI p/m."~3cd󢀜9T[<`p\YG_J#ʗ2x%Eǽbrr KV3򂋂 2&L۫1y`$Ar8驯c2U &:m0aXFei^F{2ڣۣu.g~0tdOFjG G^߾ٯ`T{s-kޅg"Nf%AכfmZntEr(7x%ɏ߽wVd3WFuG66ClJ\y?W7*kKݗM.ϕRxr(r4LR6pHi4!]B mȠ+I>C((cr$Ϫǻİ=Jڳy1yO.vc}M ٧7;P |Wj;)SQ+RԒe﵃B0oyr_7 뱠އX\OݐPoYs(^TX:$Mo~|lWݙ6` mz%*6)o2WÛV emBzǥA22C"tGhw$8ŐD Y#I! X\V@|BueIY Zv\AR G'p%Z"0>t{T@-^zNӲq&Y$t!H.`Rӏ0u@c xk!PZR+0%CR_J:>E 70dZ>]6ɱQݜ5E^[Bo&5qQ(wdﷳbd \N!CS~$%'Qʭ0$녱rۆ5XI`;0E1oC7sdhH8 f`øi!6Ǒ1$9ǑJvxq8P82En`2-4nڦ51qx&éDSc^9JFR9s3$)($ȍT,b2','P5-dO쨎k~)M"IH2)f9~vuWѭh%p7$ ow4g.}|Y?sL/z9vhtC,ډj=^?%x ҭ+RXm Z[烦J>8O΢<<c.حT弬&udo0<>\W#/bUX71nvWx~{/>ѡއj!3GNy3@+IP0hs1) :yZ-YvN LZ3ȶf!9f#O1ӑO!$֠$G`bdfk9CW֒ѵHJ.[?3֚LmBmӚY6;ġHDX2}}٪!Z<{}vvk+VIP%­1k့t9[/6fYr?{Wȍ/E+|) 0,peq _`4؉=Zr2E%n$[G`&ZlSbU՝i0-[]/ݨKT*LO((J}G|J|*gs>G" EvΜ&d80S/ W,&T KM)8 ԩ)%5UhUe\91v&#Ow%b<@ach):0CȠv!q\!rGӰYP#ԩc@jbٽ#(l3s 7plˀ.T}u!ͥV뿣iKQO;f `R}rLl !L#m_֐OzxyRΈ!|^Gl{JMn{xzy==(ձ)¦qqãqZ潷'F* U0Ϭ} dl {GYSNAZ;'ӧFi}y%JCٷ,\ά+Y@,z˄‚(iKQRFR%x\!R ߕ 35ԍ)@!lvx~IJY臦26]gJ9Vx_xǸD#D;ϿR?Pm&ƇzV5f _o~tӆxe+#ZC•x AkBUeR<}O ]~g$ ƌ3kCX3܄6p8'֯" ~qr'i=$ɖ_{{'(d2 pU^y/ DZOkiagw4(ópcc@Eq-@w%@w{}C&"0\ܛMiRcO]>wߤjt[;I=\bf]0O47h]z@; AfG]8ok&r~6X1Olq78-8f |m,LeG 6K~YQּ >n$]{LHyx[Ɔ9FBF#y{[8F/ۓoVyq1,qGFgZ݇aX&q0l͊ʷ'GgEÆbZrѹy}fpf*.W>v2xoF nIfƇdtFrZzm.U\֥kJ%M?\ԈrļwlbSqoS[?E }]E }]C_m0Vrxk,WNy QʘmǴtx#@5HsF#;ۘϛ&W@>Ll>݌{yjB3u-gP A"[#ckEދ.X9 tPXv HQ!aPRƎg"q؊D^*1q A`LC Ϩjmj+hrp'5XzhaZE]Uԥ? #Vڿ.[~%WɲwS92EJy'J Є'O;KyJAmgRDsuwhPd<^&`4 0e qA^rshH9CPܪ&uDˏ<Й5ٺ,܎x>?kÿSt<<ޟ߹+u4ߟoQ&n:>\7h3/`]ס-T@K(}b摠wqOMc:;9Z4&"1E͐'^w<4Q慞P[W,VOxr@+@zZ!_MIʃ+̝J[GZZ/h~^iaK Uc!k:t2ɵ0u<{G!/ FVϖE1P*+_1!=\oZMG?s/ۛ!ϟ .яuU a9ie_9 ("s(^@wm302K"st_bVI9<䕘<\BY $)C&rxTFIL,x"UaW9JZUkC?ƗoJyyA7Ν=QgqNFI8_7Obw|5E9y3 /Nt8 &F8uRjZff+ -w7,Ϙ n6tR0G6WYt8wp_0ZA$"9/'98-l@lKx}_Н2M2b C36cGIRF9jR]Z <)IY5X/SMїGk;Lyivz׾?'m֨i-x IL?o5&2 Si'%2Zv4 TLU_ (g'NyC4OWq$7X: B!Uԝ~>F\.x"JEƋ4[<:5j5UNZGb5JDi^xZsfE, OnR./لs$)M,\ޫٛ):tZ) Ш(UhS{) ͘5D Cwh[3pA:RZ',:(om^V$B+&\H&ۅkfzG[qYYk5h#VV!DP FM `+fa}v|Yxp WE}r_Ĭ_" @۹Z\>}<,>W4gQsřO~`{|At>|W77wA}Ih%9ArMAMr̟}v/Il3B)Ϯ ёJ! ʠPS~~v>}¡"YC`FRúé (D s0eO c⫤DE% 7҆8k@7T)^` O5P/9dmhM󚹀[#Zh3Pu Ŏ&p$t4T8,sr! /Bhe. Oi=#PdŚhh)ExGCe&+JQy݂vӘW%֤[%Cҍa3O 5?eBh xC Gc"1j J_^އ=k K4upbe9^=2jvEi< J +jnnUkGV҄[іԈa͙ۏsͤ6a`pw/cF "Q[%Sb Z*;N*l/~2ҍlUM'}SثVѰ~?|>(JBI/eDJE*5 F"̡n)N'AGpʲˍc$(jj(q(h#`NX⩧sY{C>{4HnWmb4(P*1xQP0bՊ[-Z: bXF4Z(˂5k`LD%y0:踾wkOYx&HlA\ ymă`0򉐸qáZ1ەZ@Pj/#HFT!& ,QϕDKm R*C{ +2 8J`4Mv \ѡCc>GA֍J>aInc s}m;É Dk|TZ cDwXV8f\uqH4a~s].çK__.@E 7>Z^tCAa *:D> _U(Z/@M~f-7 kf+8YB+TZ39NQK)8 V̘6)q%E͇X< x& ]? Lq9EK)JSO >w77ir>ݕs,9ma'knrĪ<yT֜P*0LU <]ց4{I;xb)/ٞ9#{ ҽ:<Ny]SW2M EM4lS=ŏv01=-DH½3kQ z7n4WF~5ˑn}-ᄈ55G+ҔObw~1l: zJ M=ݎWnW-x1o#Z@sJ:Avj v]_ 9_./Zt2LLǰ*a=EWN1ݣ6/tWx͹ 6CoUhs9y]e9Z:z P8~BG9$z6vsfjf4l4"[-$&="qfo"pD ~<*W@<^*zwwqV(ëD@ BPWN(>$7 %x=A7IPT-ˁ Vm:alT-xDoM-ulG5K3P| +iL{tU3z7-0ڂA+V0Cm[-R+H b$l *(ZL%g6KlYڒUn3tSN:t2N_=F򵔪cE]!IRfP.Xץώ L qơH1~$s/Q}!98)~Da+x;K:92d$-3~@yd6c/#-wtpL[%}w75QEoykX$c]pb׷K.mۄ-XrHwjI0R+| #U!&w^7Rq+ǐs!Ipp%õMY\S^yяڨ5vhQnĚб}43DХU{Lj@^<}X < u|Dx0goQ=eweb!ƺ(N9_GT%l!F7iĊ bZ \\7cT_Mc:`910_URDI.WߴKapC1ZkCXfhP\lVaߟo;,Vw/Ϧm͙wr0ޫ:ْ1a7Z`,i/wƮ+U3"iO?7Z7' ,@X#UL(8ԒxM4\LA*$\OSN:e'YJ/R& AMKA%4+Tf}vPIA2*$h""(bKIQ& McNBVù#'Tw_+}oPjq:SCd>掺DR  p9 c1w|9;i%ITDd1 FH&hRYNPY¹@HMXIP>sߖv\[/q#{`vEJKIT=g;iy' >͖jjjp\*n/gLLLL(3qzTE1 j3 "Gp 6MKAic@'\UkÅ1@5;>#M›4,%Ag=Nx>X< 3{˸& C㇯H(| (bݷa"lFBݰ}uj0\lOՓ]-Ϙ*(jl9Aq8u*\ACKugzQFre1^IK _zKRKH|xǻ3~bCVnUAo77.?zxix3yˈOԛz+pK -MՆʹWxsTZdK}`oE}mu;*WJ)6C8.N1/8qП_i_oG!#M 0(jb[Y iPq4@L"Ui j+Z.͙ʅsq/?y~ף_'R)z0rr%iyJS@$˂Zn|dJ RSSFaHV~ F @9#x8A'dG"- Q!hAYHmbF_5q5L͛fhI7o؀G_;yF {⤤B'"[5<>}uG;ķ~nbU tAyK@k Cp$Г!iU-GFhi r ڑh&d 8 "Rbz@SX# LIY2y: 51$K05Fx Mr/oB5rq6u1msd;3ݤ#ُPӫ{ JK:^R܏}+h!F+9>?)]p_[dxd\f}BX w㩰|(r"ޝ৴_9+~߹Mn-O.2y2P37->k 0wguzaZmPRW2R#Q&{eVL^ב=;ps&I(z $eL2q!B&#Aelm߷HR4I]mKVIE=Cа1VQ  ,$f\z"nO83˹`k S}l^''(@YD%q$yGm !M ݒr~g`ŪF v"7P` ʩԵS)tϼ\UZKd)_ٱ4 7x\D`,ElD+M&Hi50C0JDS#Jʼ4UUd`YʡdXfxZp0Ak%hȹ.YIR@h"A卖1(01ÕЭLESdЗ`Aq>: %`t7(q, shKF0%b(D\"Ge2Xhb( pJ.BFO33P$D"ۚ9T[ħDrC{Ubw 1/Qgw|4;TUDq#殄B$Ї Y՗R(A1I M"NAxSRqL p,Q,:EኣfEB-~r& b=IEyyS =j0B1bmn Dt#18\df I`dSj4FK5ve䰪JDnIl@a1YNJeZcк|JՔVX3(SUZGI籊21PBU3 IטS[̇hƗ_11jl.u I(&.zƼͬ(*&n9;'y)9UM l?.|Wn,θۜ1Z#]r@z~=W>*7bo0}Csr<=;=3>0_p?J}7 _5YJ&.E{%c1>ji^ޝpfr#df"W8ge?o9 M}˨ѤRk6yJj<پCΜ5+R°U`[ۀ73TRH&U$ 4{v_&CT$W lI?_'oހ&"\bZ3.I7*9$\]~ZʮbHCRT>\jС>OYA*op;KZzـE򅡐e#^\녚9޷#*&{?ts]T6tv_,_>/iĒQCRrԾYԈ)zLig}-s 3 >եAvSU#5Tk,0>|O6Zn*{~\leSM?'۟Y2k%ZIt%RA ~6嬌aC(K$>#n9)L};vwSJ̽%JvJy0@yG<l+N$Ɋ-"+*%&9l,K[:@Q\Qhg  d׆@*RvS.gUBƘiUM@z=U l4 Zý2f܍F2 QMi5)Y.5Pj2(}FYnAO(’*`qE}f׃z331V1le$3gS\ r ɝ57HhJ| ! aD9eIj%n QZ$s<27jA̍Ƙ ^i>`DR1]chP1^Ή.]4R!5R-wֆq!Bdw/w:@PA(d5PڐlY~ڇ޳6qWPyJl%/Nfgg%&";~= XҰU .v5==7n!ƏdBvWOb `[*mԉmt>:W=cZ5x̢cFZFrPfT }'C P_pBS:}w(f/v}xyR(K=46M$#Z [ʌ[ĜxDfއ|/\9 SR aiZ9UE AMq,!0xtQnp^|TbnJrF $"Uy`,ޞ|#ٰ1c?k<~'uAy*Fl-p4N܁jӨ^ N~Iʧ ^D*U}L}9=.-KAPQZю;ٓMYߖ=2%:Օ4ޕB*@UPbe- K7 ^b̙If.u.xι~W]9'iBvg]U'2kw颶뵻 @ 굻آtfuj@k)=86mi3]NMڝi<6L`SE eAӹ]|:F?3莆NH`+Y|a<%%7#r_>ln3Ai7Gx`uܐ Zyhqq+Yi4՘RQiQjJe(\Xt;]}$mM9kqV3HlO7m/_R xykdz{+>KKv0ge!5rQ(NGh!kzf X\lfA`=UPDŽ6J o  (O.*BAXfFfZ07m̀yo03Ə8SJJ+b+SH%o_zSٵ|`4`p{]^;&dcr$P H}2ҢPV+Mt`cSVBAP=k40ab欚ebiRY۞?XN,qfP*rxցeDsRqT}d!FRO_6Y4S 23f޾¨g!aTҨmawccKR{.BUˢz+ӊʘaZPDn\؝&FơL-Ĥ&",VJ9Ț0#t$R_n(7Τ! ),/Mf%8Ee_Fģwj)i.|5$BבYQ4@ ;z]{9ڳ*bTh+tȚԒQ(DPdK?ڇʱm5{iGK/Y+u&\a1:]FEpJyBUTCvCjǨ'a6Or&o?TO|'S+': gNd wf"̼D;C;U25 +h JkV=J8h=!."Vzz;Ȭ#P8dJ~{˛͇):Miʅ7Mvf+/IF\ gpҟNn ǻfӞ5^ yㆪϬi Įy~Z0'rGe#G8+6,QDn3Ѐh>uwpP>C™l?߅.5o`RhB`v{uÏ".b[͔Ƌ$I/4^4 a~y3CeDچ*u *MKQ*Q8CPE0\|~S{*iN.f}Og$֫nQ9Y|E}0,{/YxA-Q^覬a|1VERZؠIP!轨PbTF&W 6C((`_ CA-UweuˇdiUFn;G;V*+ƙw; ͝ucxj2-ݡsxeN7_k6fN+2 nsM4@°N.Di_h(E(>*(@N)bZ8V Pjns36܃uR>Jrl[42bQ/- _/yuG Au^~ͱJ+cJp5tZj<ƁȄ`ϽcC|zÚm] ewZ>ZSV$ ҋ7Ы_ppۘ[M-jMMe&m ɳpᓟߗ7'K'DĚPK±D6 1c;J[2,R6FSN [T1ڒ "LmS؈4EZBz"mAF+ iKK_/N{-V$P8$ukrEɧzE}کZZSka(-0T}L~;@NP u{լdXFoV*W)WD T*YeQ,4E4FPHs'VܻьTRZhwjfHtg?rӅc9VhQ[*XZ@o%gJ_@Ե%Zj_EmR$FBfSTԥ94s~%s%J?^^_7b-5QTi=Sߒz-KA2%L;2MKzwܚLclrg/>Fܝ(>QxϿn55*aɰlxw5z}`&]}~yzfU^~|/˿dw:aw)zWʸ4Nh2.SQtegwW?mGd '`? S{2GE:/ґ~ެ`tIC%b\ |LUQ,6إib0B*>.YQi$FmI_ٗێVzӳ;5O;2:—Aơ[qS*q7{!Nj 9\=[5ov}deKOߦWaͭ㴻,98 V.߸v)a# dj%ڵ_/̚ !f bhO67Wѧ=fCʸ{pR^Ao_篯.wKNT P[Ug(Ř"`TV]MPY2 m!UTm|"I>% YhwݝSѣe au,4> -XjS)i-Xf%N| 4,DnZL,`+ Mc"hv'%5mwu}Y(d ʙDܻ[]ŵ(5v܂:42j<ԻTGgdmSaoH P&lCjhAC2ʱC2sZ`GF<صuhmеmOlZ|uj4/ YU>&#K@;ޗZN:h- TtWi$˧` F҂%$_E23k@Œ՚*gD*%#8 @W-:PRQ͢(2*蜤rQTʏ!n ! >}n'{lO'ktWak $N/>Ə) ˼rw?|H|v䏄KxAY3~M A}Yr`vDKVA^LGeJ$"ce@>SD3T++HibP96ocqFMwGFLҰݗ#N0l25R=&&W.ji:LC<{)<0B|x(I䴰<P&۷$s][RZ-.>-g1F5-g&v ɾ+1B3;1 *o-a J -h+ir!'l%FZS岶57-HK^`E ^Xg}u>țDXC9q Bap҆l@Nujo _A޼ĥsI5WWFW+MJjCAU-! ,#R=h ,"գW|%IQ4Zi%ڮFdz *PiQ8;H[(.#5G\V[-O4goMm䫋]P,+}'鯸qɳ[~ml$t07׻̤~d>;Q&`nGGrnϿ.NӘ(7z1,:Z<_ON%p K=QUftbkfhX@h'.\$ߙTlеZ?m:m:w1}Xy k/C߼76{ƀ3[%Ziݺ+Kt/{rueڃc>}1_@⤩ìs?c韔L 磓f ;i s-_7#_9Y|f_紫qofSL]N3 +iآ>ltޔ@<+CvmoS^{8,XkfFަ5YNzgNQT:a_1[5eÛ<,ofn6fk\Xb.8A)_bLa zxkJ)cC] [KjR9k/$Ѐ6t@rvf|V@˽k ^)B V2hU9i4RU`QxTU銂 7.0 e(4Zל߱'Ι['\2N=8zձ AsfQ80z"@hZ}3:^2St~!Rg*^)Q>8 eڥ}+ b qq|ILQ60?"̻5:Fz{aUP:I% RD7ȼSs ݱ#!Pȷ5GA`Q( Rrsӡ)6sSȖ">hN>] oT2E а&p}0N l()(#Zr- ,μ<׋B\."Sf,DMiGpq!B0t7RB!ch>A1p2"[#uyҨ uj7T9%gd4}/ ~w- QW2gfԋڵO9C9OދH sLB0m4CzxQ{Opcv$LO-꧗ mpa@RqMǺ@Q ۫Q&0 EA FCs=BfyΈeDfNd;/&& &3"jeh'BV_q=*!Lj)ihoS 9Runyq7@TE/H KO]|q5{r4 Bۈ v22Ņ͝s n4*muy1hC[f AYY j*XNA9<(nCz.ܒglq}7@ nLC?@BE8^8xUzPz^bBg |j"8\0IS|_7/w_twxJkQ:V&8ru'7OMv?zڊi[&O=v?(PYm`]_fr L˛/{%쟀c9m1 h.Ͼ|]ЯsJoh|:gO+}OR %gE[64>KǞR\.ļD i.?dKQ m^'A'nBlV;Poo*ߴpdPq7cv@(txy]i^A7v $kd}O/Cv̞Q+2Br 76J~ʾ]׼q['>>gyˆ,?Z)eQ\{I\BXTšݡ[#g뷫ۏk8mjE|҃U9,󵭝S;O;$'LH,Vc ٲ|B?㥦J {. Ы$mcқ0~ZrO'W& 8Z0ę#x'YCrx!wx2Lޔ[2K%ꫵxa3m' )]:X1v:bp 6jՇmusbEDݹc\m1OMK7etZY>vpz <|T ;;&4^I {׽z[Q+^pm9k]x:24@[?N1p\,ƹ9c Xir[iQqF"_&Z(@SZ.X 0B3l'yxPK8k#y}Xy@sn0|ݤXV0tl5,{xXs̘yx{?^PUD!U9@ `-:>F&̦oe^Rߴ4r*g.nnIUŇهֲT<00k,P%(IAz[%hч8Ep菀'+OkmsLq# n,nɽhErFrrpFKl݌$,oocùYE]c@h14f2]+lHbc 4fU ii;;!w&>A ^$5+mS1niZ9Ǝw~SfoQnK0H0^< fA5)|VJV +\VK|v1aӡ7e^E>}r5:uG\ByS="Ҧ;_`>mM`68ҳIT0`E@*dQ|*n A ӽ/>\ )'~1j1u!Ak*(Qj8᭥!4i<Ǝ7衸זY-wVstnoW#JjL;vrihNk/pؠ@E!t /Wu>lBclHw CqA)Zx6|d@݀i?D`ߴ."JQ b:J5bfozit|'>թ^^a3ĬMV~пS>:Q0 QJ++lq$;co(u\'JQ 鰙WsH#}j{N`N~L5WG2mhQ)hbJx ª(`4k^bRՙH`727.83Fe+hG=MwT7T9xk-8`t/Ql.Wx'`SST Ou(7t4F4SI]/v>^QɟpebVTVmlEksm?+.!> mS֤Tq'4dmZ(,?m|g/? U:y{pn/eᝬብBA-$f BG˜p6xnzØ!swwT$ '^3=>(g士sm_/>`н] cY랞?+\ũӭ _^O1٧rOo^GqJzY;Co{caxw:蟟B0?_gp_N(uv5]W>yh,;S?١ `k<Wq;eLa?7bON+:2[8SZQ܌+ۭ3Gi&6Ь T9z`@ 6n1M@aYo6QFfcB.8.ٸ71*h`+()VkYD`SA0ʽ f…HctAcH+\J11&|m$H(꬯6FHx8#aYHvy/52$Q)>y #i D$cp}ŚA")pvD h*Lc 5 lxeX6:پ6ך&

Y R4% $X8%N(y yM:pCMfn4sjGM2 pUAG=Ӎnݪӌ;}w/V wC(&h ,h &Q*h)מRՔjʈX)8PlxbbYN7 f%*n9 WJ\LK(b$21qZ4BYfAFpdsdI#@f?~.(禺֛7o6Jn[J}}T"MCdR\TԮ*&-Nz&hپn gӢ6kdѠ^!gRD1aJM4 ̄06:pDXXw$`qFt܋Qp ._l!70gfA}6q ,d U l4{ \HCp8e,2͂BD 7:^Ir:e8;MWr*U UU!oOk ]AWO*&\1(WLFɕak),T>~IL^ hB@"  "520߿ +Xޝ]4P* c6ε5_C.Y|ʼ8Dy;ymȣ3_\Ә2 \W+a+jZ*R[<7bQ-ʽ*x'8|(i*Wִ|F4%apjw՜!qCsi6Uy!&L)(61&RCMxs=4-OSR204<7AMEN|" _K{E&-͢[H@5Ӈ>h7x^f!p`9G*^;z,HX߰-j2#^MRx AqiZFB0R]]oǎ+v/"YEƻX E}rdɖ;bF-ϴxd8>zӬsH6k/<r#$v2A_3%hI[ oiѹ7p{&9Uw] ~б{6ͷs` ^Ƣ'Xd3wy7|n ?nl7g [co-L-7Zqx"i p '{v1k<,\@ HfDA书8œ|:|@gdS&9ͦ-!lPۇcɆ9 @=&9ӳ\?Z>u~rVE]-<6mb.2[_`t3z עE8c=I]>Y勿{{Jo'8ǽ~Տw__> =9]}[!r 1D*!x%!(;& K(L~%8ɀB|6ѼѼh_?d杏杏杏{3(_.E>ʺ!xX63v|7]<%i{qKZ+(`!I XԒ00RI kRi+d!,ݍqfq;$nO,ngq;Y1etQHMP0WZʇa2%67TSأ|voi5ܽYrlŻku.zN=x{񤭎AKT9(j}%6R>RRO} U n(4so>?e駩/}o%7_Q&-sf/ÛwÛQ}Sjn?`0i!4))߽/g_o*-}CSJ >AJSΟbP8rUl8cPa@淲P@2άf 4f>ͳ5Ь߬~O,YL%ef.?T|`m32$fHjN:OB'O E%Yoh(|N73v^_Әizc/*c(,1/yʆnـL?ـJ}X#GzĽSg8pXɻt eNc^WO5P}I " Ĕ@&+9B-l-Ke3\,eϩXMy\,esl.Ų+RDRa.K_G&e lZrQ:n {Ae$*eKSk8|lS~!â-o9x>I%P(Bq$ O[P=&w0KawkB`kŰBXH&13RV6:Q '}']T2;q ;\#ϩ8r~\#sqd.őG|6;%ƋOeMz-u,CC`NF!섾{DSx3Ov-ݳѣ u^g$?:?_ӫTPU݈F;gɯ/N|%R_׏G?9ΨKd@C-j)d1ѿg|bNE` Fխg n5;c3x hSp^¾e?j_B}_բ*&oH~|y|UUm/҉>R|Yr%ؘ;4 -S#dׁMmŲ .EzĪRVs6ldbT=o{lfe5ܠIP KPMQd $*(c%PT\}J0&%/Ԩ&dWP0nkDEBdSo$ ˯UYw! <>m9(u! ťA4-IZ+c[%4?7U[6X#N"ݯ fWв \Vq3{e".pi{/CP>-\"pU: PB巵 (uwrʋ,P`3g %+<XS!s8o {ܤf[o&THsrq~v f8Ҁy7- dlU!CuH K0Q:I1UΜCÂRYo @r*6(T2uiȠB2{E ~%^/%[#HʽGV|`w_ {=7ˣ"/k'W*1x,`!@3G/N\z`A J!(dXC'7㷳3V_u*k[ #ڣU~/zcޱvAlK5)"zy˙b~W؆KKL,$9sOmP(P!Lw1JF$[[r9^[ Wfae嵉F5j$u|]h7'gHz^^ԛ[ָ,j]G4C}3pkn10[5]!So%!)N$hIC^ɫ\ىr p5^=j6kz;'njZ2ѭ~R驛;x (uw]yxgc?k|Pޠl at4M5l,Z cckn;z`Fs/t?0?4LwL^,āL&nLmg4FS7K~ _ ԧFz0ĈҕT`~6Ed0)Xq`uBY;а'N(+L$J}LuG/j_cAk WDv1 7e?8b|0|xϥqmk(Ck595VWbIRI%P}v'Z)rLBE5fG2iƕ g7 v^2SEpѽvKDOCapn(6ـZ}=!*|?o:Q (;2/̭I\WߋT\R*Mhz%]^D?\Zac X JkiIBP2ezftNS`JC%UU29Ԡ! `jŪ TDU9W\ .Bɖ|EΈ ZUpr{0?.#FPw~)l{MqH^ue aZf3QՎfGZMՄfEcqM5= Y{od-V1|mj}kYp{+|tBRA3d&8ݑTk mqO |܂K)/!95Ū,4[a5?W'n)Ej~qj^*HBHԪ4c^*mCԧ'(/%#d2@Ib5|P捙^KEzOd;2De_enG5V7[3Jݹ;}J'%c}Xg HU0;vQrZ`Xvi}o2•]q-; xyQ;99ä\cVǾ{ 4` VB`7@']#U ^73tnc'gk RcNjz'g`LLʳVn/N& cD2"Aq;75Q)dZ/wN Gm}bJJԍ:s~x|QK=Ý>j-vzcu]7Wr=0A;X\n/n$=exF[Z[VbıGb׫UE֯~UdLpA0&|.|!SS7R딊JIᣴХ,VPy; grS]Z!2-~;SHFg1 [AGZuVz>^a*rH}~eEM7DrcYPb96^,wlk.AjYȶZ"۹7 \Ϡ1val*鮭C1PcKLH2&rGv7mHF[m+]ĈH_zdL‰ pz&Ǔ1U"]x.)uGCb'qwEra-:5s5V`\a\`,W5g6FнLbLWS2!ԍ9՞ɩ\O]8Q,`Sb$R6gqf;-}oӢcHj-DY"B_P3Vx3@HunmǘRX/mS fӆt)u,ukWwDsL|ֺ 7MF6&j$zȇ9 v,pɫkHXu؂-<rY\@&spI0 7c/8̀v 0E!d"480եFvUK碖*VpYi*)SF,]8$ +1B|,.9/Â;׮.W#,ҜR4,RXz11(xO&$Ͼ$P6i©Y+K. &ERR5zF+ek~JtHj2(ejL̛M2:*{ -/z E-{.;ǻ1H/[Z&ZHqD)cg%;Vb/8I ZΤӠ/vC:pJdbl2q)(J_07{^ /:.&0ҫw\j|U*$l4I@SWz AyOyAXTMF;kV&gd9&$YuI,ō$LJZDOq}hhℎ kؚPMV^F\=P7cLPdC?c, ġFLL8 MeQcNe"OSIwmQZ%1&#AVS0V ==z4Ne<zRON~J%L]zhb~6- j,lA|Rl}b[_cʻMYi!@Jfdm(Q2yD cgZ}/+'x?YӹzO~ b)4>Mħa/ꙓu=7b`"*0fq9XpI_ BVIsjH aPeR,Uak%a?Zp 6G+FPQm6i?k?M#ׄկwC P]iV&$ lW|?$ՖU ÕtA!JК`h`%-4.Aj1< E`$ܞ}PjҬ=MU%8a"Uڸ0}) l>d cJ'ކMhO<'0({9njpdDc$*5QlqCر֞N@ U/7@qԆ /5֌4̒C @%QdQ =/U-2&XQhoOmZ׀[Pi_*^9B0-lq)Bݘީ k@)3{GΔ5wMhbZIP  k?2d^yXp #|d;fI@>(-s.Ιye0>wj-6EWUD_EH,nܣk* ot돟kSŘʒ|?M1kN'$="v:{ ſo׋wh Zf]M''\1^FM,܍C!^ŋ _*bu0d;)p,l6Tk?>~X*0N :B i,DAh2B}h\ 3ēf.P[]P .SL6]mw3/kZ8gv<۷g-iTx2O:'T׻f 5%z77)]]o&$|7VJŻ:.b"!w 0v3ůkԦ%=[t#v/fJq`X$MzusR1ݗ\YpߺHe'NjI7 x~8h:ӣB)k4K`b$5T 9,l͠$ sb5ABT`(=Pb3ooC9j>ЖokuW2Dܥ姾<Swׇ؇4jj'fo@N[QvkN _zJIοEzO={|25ad;*QByWw"Q^Ur#4@5?qa(BkUa_gqޚ,3&\"83,Ml՜* ? =,lx 2y>U@T+sԝR;+2ұeD C\JqMSdH\KgwǠH.~?AK`~PՉ|יsb 򑶥'GqObm)R{y%.GghQ^Pu2uTsScI$Kۂ?>biy*xd.\+(NFn ͼix]7pg)ZAOΊ{(+]*Y~e»<_XZbeo>QeM(\,egHEJYenP}8[xߴׂs l]iC)W2@܃7 GFB[A570D[r98gmHVsVݞvP0f~NYqt ;b0Ku1> ,%d;66խZS$G7)iRCm#V ?8vt~JJ)R;48= ؞gZ|ƙb޷(k>pTc6 PrlX%Y-Tΐ3|LHw1#ﶾ}s6%qhV.!ܮ(r[*Ea]ZY|ˇ6uXkJ[.mDe|3DZYHQ fH4SWq7p),*Mֽr-w_L~H<Wf6G’#D&RnsRp6RI:xJPۖ5 >nUPPVn0f8)21Ǻl"8vlwFRZo8bƠI2#Om=fp+^? E g2%Lx0Ѝ0a8yALZ\P ʺv0塌IbPc!\F؜%X`q2ܶ0%7鏧>t,Ui1o]E,UtP C,~y'oy 轨TE.+ɴ~-kh?ڪ@|?Y my0)D=6ԗMK/ ccT2< )Y}X!mt1/[[hCpV)2mq++6nTPҙwނGjՍb"P{CkiwI6u"e϶ڛŋ*Wr@C˾|M mo9L1OU5''|).4/PEq[XRlyM^)JCEYqB* 6ݶkx HS]^@5l* ڮ g*O[1yjGĞ[Q .ͤ}d(%;a_ې;Ju>tN̐T V\PᮮpYmUktA6CTt~:܋rLYuNs_M<$skXkiLv\ԮnI#9(VRRM DfP8Ңeug),| z㻎dkgTWK̄Y1%Mc`^@eW2q'2 .xeu!l5OX A!.L cMaSuw6i;(jNOvIS$\RiBM^ٌ!TGS4 '8$ÍܼzJ\O]snbRpE Tn0T;'Zn2<v\OjmIܦwξٜfգN#^ S4r7<=YaT2-ՃamNkj{jffvហxʄ%v..0AlR;-z _.M$$A- }E[uvH`dT xq$ %}˫gSHz/)tGEIKksЊ1"B%y!BGYv=Ő%@2@{Zd:!K[BOq흆2a)J:'[)yaήgۨ;H=f uD`J;b2&BvNa;6\)+-]T+0MH2-#5xfu<2lZ1/`VRl, Y9){"R$RNH)GJq .t-yh\堸'YbJɿ2@#ԓd-@nΣc9}MEv-Kj)JFHpa 71>!@4Uܡ$EWR^im'IA\ Vޢ>]LS,#)JIQۡQƾ7DJ8e{yly%/'wЙ sn95yiGм=YZ9ҜA6ݽ"\0݆/j?bvCmI)O\^b'6E4 a<3!.թ]CoN-u 8lX aIіqD֢օch R !KC cZɯk_w ;-}mmsb9. Jo#G`K'K+Ya>=>fW3&1F&$ĒGhx3躌dtܙظB|4&*4OXFcdGI)c(WdbJ͍cZ&LfÒiLW=,XcZԹb,* %QeQL#&cuv"-K9W;YAUj 8V]YG ~ܺ/sl"7e%bm71d.cf.H-D k/눗nnGܱ-J̷;CuH Qtv} D`_}]XEZ؅'iߧ{E0%rnUerUv694?l[PYM%a:IwtC-wo9lL?0#UeFsg6ÓjZF Ò&RzQn6dxzgAcQu3&W".ռzkVZaki|98{fAXص;m(x*XYW'l0\eU=u,-51Ҏ\k] P<(ĕ@!N&s}tu+%P N/8o}V*]Z;rc8Q7sE轭2 xxm5yē:6_Œs:au:0qapgz1.Ի9!|OZ2 Qvgp TOpW7|'x5cy%?pd=na$WPhpk]Bk<+6G-T~8k4y= 4,5S31~y?}?LwAc h'T=&9{qzq ݞz_w2z% '_@tyD9qOB"_zAȹ4co^'!8[7sr^~2;Ѽxu f^4òAFj)Nޭ36Nu5(*{<esWGOMW@{w ]Onèb 9c}}{9]37f{3Cm>PY᫻?}|0^k?FūpP6> ?!6ݟ#-גu/]_YA$ڱE,e:.G{觮Q)B1:R55=!NR>~ }dB6:3AN?\'jE >o7_. PN,?ʨ0$WgY8 ,7Ҫܪ^\йi hu 7}zw{.ԅP̍ S僂B3u#+Q)Am\0juDzY Ï4E%r0ƭMMQM ue %| &O)V i+6(u=Kr_X c@wpaR0ћk\ZY ncQ([ǭEZ[ƚ``=e`u%!49Kxl=׬{JsX=nIp{`l{& h֪U}akUVEkUnU@2US {0+؈0jA%mk$mӺf4>X͊uV.,Ux)h+OPf8)rDZOcg>Ȋ4tC_FЙzչ[;O=5^Ku0cPIV23aDtb tDwFx6:UOq, vW %/{L{Ϻ :SoY-u!h^ H&,4#-(ֿq /൑q,Œ}u@[?cYv@[ZҢȗK’Md`R-9iT5ω H,3gU懅`4F"at ug.o1<7 ߿orNg_IX _i#:Ҵs?iӑ>lEz^Z.</ovGHV ѓ O!I=GOlP"ua8 RC6Hc-q0 e'V{7^5gɾ u;"ۡj9̹9L{:hލg/WaT.P rD~?@0P~6Lm cx+xxnQgEˢYZ*|O`. ּP]%_4qwxpb ν/9L{|oiM6&ioBWoXfzisVWnfpOgjtxwn y{/ϟ4VZMk5nq/筎"Xny: ynfj&`E>bhҢ[O%zu}uY ZΒ談|q;[;.kWT[c>]BYS%c d.&\RtdRĊ7\"/W/Dҟ)ur7UNO BlW~^^ d 3C.1k(윫6x *Ěk-s>Yy Қ:ګΚs#_ӫRxVǰ+Eo坚}@Oσ!Dm} 5}zD[K#my49/Sy`ohPXGB*_R"sIҥ._Z!KfB&SWbvV$IBLULNR.pthW ohxi Mo%z Ǝ! 51yli_^^~^~^~^~uYW7I~PS @>IHEkщLbcL5k&iԛOF|ht^$⥘9R y]>T ~t Y~9Lr`ѩ'jѤ*qL`zI12k'36^ me{w]><% E BtjBU=ʣRI]|_WX1FcD U#Hs:uC˰EȦhU*uu&!/JtJ>^bA-6\ѲQSAW $v0JY8Q6O7{bSu`bZ+8bm!Rt-KZҭg)JI1AI`":Q C:l쒸SXYiP)99F wQ!Wc+XUvysH8P[e] ʺu-(փ!M c&Pf<嚕xcuI<&JuYFmXMpɟfCщ*Eֆb*"iMK|{ UNصk9arn='3'fQSCeqPSVb$6NPE#5/nY⒳5vZlfHojRQPN Y>{O7]\uJ(aK8&F#8^UZqxQ̯բ:jP4'(֠@b$d%vk>,7Y\GN^wIᛥȋBge8.O]9^( ػ߿ү^܂lx9B/Wo:km`~ߖ//%spbķQ֌*]#翗H/49?{>۪Ecfu9Pؖ%A"ޭ r\}k3V);ף MyϗG ݾj%1'[pMmc|:{~;*$/NrOʷͦxSB;xwM+Vt(&VI#GǦCzᓵ7spC?xt󰟌C͜(9F}rld4rrl=TQ9c#`lf%Y-MH1Z=W6!TRyjGCɴ=U-e%%'kvRQyM*=k/|e; %3Fԟ;(r}=w'8K Rq)PH+ AD+Źhd䦪"rrn13O`ЪHbg=%5U@i]nhH8 ,Rj:tH8O+T#xΆM> +_X(e,ԸE3;z`Te Sv'ɨ)`F`ZAމ+-P6qJ.1-NM:΢`@]=~p7 a<۹@u3wQ1vh{H]x EJ%;*Xh0Զu9rb/=6X Q"CBFw(4˹b8֖diˠҖ(+@Q|SFTsM#@8U帍tTt6c ~}ќgi(ڲph>[{PlN@l-3%F&Dt"D<5A"䨜Sis3UNml - q2 ^%pnDgv^#α7V3ԁ/X'(VæR]Y`ޭW4k l7nĜ^J(gk$'6g1fеQc<iK'6M 6nz޾ꁳnKv~6UIK Iܾ kIO]3H77o# 'q@/)55FB=؇rOK "8H?VO.KGAz+zcӊz̠M|~}_G+PlJΡ4:ABç#f…Bde(gZF.+SDdL$DmV¼f se[l ЮRj!Up 3`.Drm&Ǟȑ"<^ h&I rŤ.2MpRzndM$IeafhX\m_thj(鷰HX %AAr&j8 JqEJSmחхk m)Zg7 d?(qp5:f%޹y8]")i69% L{ݑovq52o(eN^1B<1Ym;M uaA\-K>{$)3QgU{Scv?\XlhI% xj^5^K9ܔŘvڷ+dk[%87ޞ7W-}wu0ݦgD4Me` uoN`|su[Yy'={|SX'`ɺwQ*5Ƚ$[{讘2AG9C.`Avs-Z31DZȭt=s3cjAY@u@NJ kqf=NQIQÁug+c溻h>~k_vPT' ."uX9Βe<-^|}}!w"ɣ(~n߬vo>tSn)[?o?U{q9X™mv0U" \1xLyKRDKo!MO^kϥ5Oi>07+MܿtYz 5vp _}J ?Es`da>8VF)Ix(7 yGlT:ZDb=g,\{ˡ;Q\q0rJq98\[D iVbrK>Pa zX)r:A 3|sv=9G0gm;' iVxM2v`<Ǘ kFo%uէ,IlI-YU,#(A"&@y^+@b.{}Vi;!؏% NÌ/!uYa= ?GK]_$9'8aC3׷8"m$uN'ƔiZ>Ȋj$ wӺ?s' *@ 2Ժ4B&\pxR5LvϝLjKIcho86f3PoUU41's{2d*Qx>2-zyU!mY8=Ϋ4AckdQF fb>Пh ИֲEzobBWK&%v8x7FBP"ҁ1#eľGI"w;+U<M"|UhjGWMjax 1'5#/N{qyڧ7 ~QT㧭-O%嗦G>2Key## *.X<8a-P+BMSȌ~yոi7wt[T|!Dxg2(|/g|sQ]oST4uܑe2K6-$fY+ǢK|p:bmWpRŸ^.׭'2y4c6374+\&"%Y3;Kr+mhKr*!ȋB+"0o8 b/Xºp'cY)/L}Uh6h?t5ƍQ>/B&H6)!N"Op*qs aQȍb4qLv`(v}Xh;dﭜbF8NwaXX7„'FgvqGJytB?.>hE^,\T;W˥`YI!IZ?W/~R"d#}S.je硗˗ U)ȥ=Nnx+?T2a~`GPGv"]>t%Po %B%Ȋ}YtH掫Ǚ\tDT + ȭȊ{w. /{:~߀T>.l_jdpBMシ8𻡛B:^ğrHT֕ M+;5[q,wZf%1ϑIY2Ic%=Bdc;aMwY3-qC2*d4gU& OH N}c_D.E kgn%|13%G6xz wR_a˵A-MswN u}L1g|zuR}'Piuc uoDI̝Q/{*,͟(ΰm5aMT [fρm}%cQ>aihPQ;8w6 >slCWh9fQ8ZKh!Y8TgKO!EҢHV)ErJv5V^vf9J_"X1yo0`O UO1W6i|p.)IYE\yK}mҀ>gQCH.#zF2S3q?۳g8ţA˸1,V4y#&[Yb|X>+Q:9fZ/I44M|:19~4r|lH||}U?T/?C=0O4#Qrk&8]_W֠ݦeR՝19S<UM+fށ(Wh l/>(#>3› /m|%Kqz*\6R3"x%>&bD0QB$KVnzEa_.V]]ĭ7kD;w601 3af0 @ Â( 1 182{ޖfF C2WpXpmfFmrU`q BDFHF0^HJAak3!ГJM_{Riz`Lכ}9AMkXIG>2Key#0"b$AER(Ibkqqv"Ԯ['l'|ϳ-?χ:#)iGUJ%ۥ+Y#S%ֺz1_H.\yWaw܎x UzlK %zo?—$,C`Zv֕AQCL`# x,v[ .ݲ%ЋFvYO3}3oX&J7x$b>a񫁗} ,YZzүD>IBYD*4'IXDaqdH\M"cơL>sl|4HD|4UEwn>1ޔPjKQv h SN8 XBfZcjye2v\xٔXDîQ礼\N&Z: s[PrEť0~1#ɅVeULzV]!@k!\ z.c]vh Qki7q\B @L +u$.A,%5fѷuTq>@(H3N]I{9R.3LH"?).}p tƒBSKϭm 2Nd5IR1'5di3J1XJaisB8f -4O$V&a&w❸f'1؉=}oab5s't}31ތٮ^ϾA~Duo(? '4{^OPթ 0d_g^{?Gj)t5B{nܑ[r[w^{O UvXAF fosˈRTf-]+SbnxC]uKɰ Y\6hQ'e,JrB4/{u8!sKP$~(3WF()JY"O}eR* LrȝIeMM&QN`Ğ{^=3$;S 'g=w: ƥGp36*#'-N8QKu>IN`~;_K=vP \Ơn譏C*.GXEbTJe57c{p*Kȹn "~(7'_b3}u}+P|xӬ83n>'uʖdN6$|^ҏ73~'+Ldc>J$qXs}l$mo]޷#{={&IOwjyZt܅lѶW.8OӮ0Isp[aB7RIV@b7 BA{ (EQޅlD2l>) ^GO|znSdcslFMmNnu>n'c;Q*BtG[gZ^Tiq0aCSZcGtjy0B0&m7ł{CcGNr!5" lŦ{31ZXOd yiR-`\[*wE,ĭ6 j!Ut'Mh1&2ah(EJjNhÈ1mBxnRWyEdJs妵]y7iM=!fke=܍la-4ӛŅN`ʝ=gv XRRUj5>Mb3}o`.${0~h1cd9eDv x9F7Qj 潙LXgnOMS M aNsuD$iTy;X BpW`0K;ǝx` QR*M#+b5_;\69 aA=q^ q AH-vb=~ ٻ6dW݌>- Zc'?ռi(b{fH:rD3_UWWUWWiְ?o硕|ܜ}~7<ϺT2v70"Lv]{>, = ,N }< \4k@$CuD޽D$OWtJI/*"2=iL{e}G~5 )A=hLMYFZ f0͎([|srD9;C\+Vxc2MajW}bn7V'[zb XRpۜ76-"y]u, ;'R0"T] afT*@;8cM+7ŕ2 /6|50Ɨ9.+8"r{) *A6Ȓ(DʀE>Y1ϰ%0#|5y}*@Ԋ;";MUw$ uw仫/R %IN 5oԜ(AJg3Q?G^$>D`R6"B)˫-XD^mεKk6F\7MUS80iRM8ܺ!r~_7:Ja M'Ap -Xc.Ʊ>ϱ ;` }ܐT9_MiOaglZTR9k-8e䤥q}z Tpudt;t3*EtBaÂ3y^5.)ړֵ/ ?[e5rkzU>4D ܺ]U[v~X`pL;gR0UH+X)'Tpޅإ0Z!{:iZɨA$VA{KZh*B\F֯߱а[-FyFځ,;+E0-;MSc 6)r8UMi_N(') zu_>5ì&>~IG n=b!c>|է#ghFap0\ +ϕ쿇ew?X1ͣu$/iqJVu+{\Уzd4^f3ΓC)p@]9LKI$ ~dR+8 4(.s1981̰B}ux Jqi` YD~~5U$]qՒ7?yᷳA09d VFE=bL+EPPA(J!#VI|'jj+ oWɾLpR3F-ɝ_JVW3~ɷf~tZM9.-j61Oӹ_ޙ`;٧[+x T1S Pe XIcPB Ww)򺋞vKw`aSd;Y)BGwW̘x]ЇEd\Hrz)[]8% X'O%=<МvZtͶDpD&RXIx`4Aꪳ 7W}5tUO>,\mK_)m8ڹ8t2dϋ;`Vx#w0OaIӻ4pe\MPGU TaIè;a c̜0hԕ 2y9CKZWOtc*]6m9S3Pzyý|d߬a̤e&;;-N,w> y&J6V`iM O?n&=dN5YYQ7̓ԳhCbdrW q5iMj]epU ~AI@9`=o0?L<|7g3֍_Is`}K.E39aIz8&p=]I{O|zyET,2]⃕=BF{᷵Z̫h3וYtZX(ݳ im6wfn&Nk89ղ6]n,M5glA"ZB_?ITl [b,:z;Oet<]<U8WZ:<iaxX1>M$nD _TVO˴l~/4 Zy.Z+!RkkFv%%:^:vZ*~* T $;I8B (׸b v&Zi)bEVxi2k'o5Qz瑍H!d[`!2j5A:)Ø.^h5WHFͤv\qMot!lNEov2c*,Dp\EGcA¿ !4Bk d*"V `@+~8B(HP 84j%w`z>H,|s?]~C|x`䟰u7:zD's0.)OֿsmƓ+~nۻ[DL f=SvV+Ό陰9,bDJvI 6O V̥OP.a)cit-9WK5~4~Bų+܆p`S-:ǟ*V;Z,fG2@'45 8%2pE ~<#pp $.mt9J$,m `R˝Wbˇƈy/# brR owVɋρ+pPxdC&"B">NHpˬ6y î:)f]@3HXLRX"ưq(a{ !b!VB,9`:*w<`xj缠4XI௃a0Ķ #%Bty΢؛~SDk O]P|~"kze#X.YuW˨.z̠*$zK~{;Mx!ׁI1;t6zH.nP M$V"i/QMV.>k݄O#vd| ʦǽsi޶26V㎷IͦTInq ѹI\kST:X *l*7*~;1ʚ 2N6:֒!H: Fe!ix5EOE˱Dz-"xj$Ei0G*GF@ TF0˞Mkqo,v N1~ 3ދ!ӡ=Z7RMv7kZGwPo0ZZ7?Ou$7+[T]Tt?\ބǮp%A4^c)\¸|Vh`>i+#Ż;,~2Hc3V䎗z1C 1*O#%x.aüF9yQ!u)|v$J{䜈dM|%'IZ; %?i,){rf{;J5;4]Q>-D 僈tEU,=x lړN.<</ +/zQ/3^٦J6Űb6+OB$g%Nk*-bBXfK:JQ[^=Ű%<yE)`ɶJi)oTqN2bn-W"ímrD8m("Tb, ӦĔ(Do'*7sYpcB3mJ^k(%11VP ޡ8(`(N4 9x=whVZŭ)ITCOK%#H(BebjZJo VLr _5j5ďdiHb~I@EMɇ*aeDs;o$s`)ªyGB;J&QstK(Q8X`1Ȝ4Xp@=eH% \ t¸PĻ֕E}j8,n9NB]BQc@P#JH).\|vv> HsKQ1j;X-"Pv 2&~GoYpe f y=Ü \ZnѠnn^qq5]2i`|_dH, xԵQ U~LmT.a+涤ؖȄN薌=тvIt~@mwʺ$nzsF2)¸%QpwGt?Mtq!%V4Ow>=w Q%dN5i~JeUH_hnVtgM9!yno{=[lxXP͏VMJGǻI^>82ʅuipJm ,H)h!BD8(.~Rӷ!6 omy0N3-5ŶuZt@L-m2mB) ڣHC澷Qc閌byE ]hAӅ2HIx{%E彫c1b[[T1oyȿ:~c^.t+Bӽn| zm2On ]nb/Cȕ̯DĿG0IQG!yޮ] 5Xi].%*,;zioe=Տ_ܣN3~:u]oay{B;ShU~CagķP9E),(օ֠Q,-v}P+U8b޿*jYVB(1>JMg ư;`VbWfʙ],H1@ro ޡn H6u !sܰ ף@ ˄°V?%&,vBYb떚)}*5~j"Ⱎh *C\ HD'7I\N8Z|ImnbW571/LǷ7ҴDdbp.+/^17S^U;'h(%״g.:~|Q4N;r/{Z\q&=FaSﳪi$%3R/voʝqJӌt׈z._}9hVb\ J-jOv޶Hap;8ѹSOwn1N5BTTodBǍ:p}-&M0q8*ڱ9=Vo$nOk;ZT׎G;n ]O5 }pi'k릓"qBjԼ;{uC B {E{i&v܄ÎpqS=XχB*FQ`)JsskZ@_C~Y |ZR%eٶ/j@4 lvњVN*9Jشn u{xLFܡ @d; *gUI7UljPIY[.CYu`9Łu6Pʎw.K:ͬ +fTFi$'w4jCA%&` ,/r޵~Xi"> rez8܎n19T&CڂeoKwƏgrdXwfHAe`\lIhIhbzEXVӬ`Yn;)4*Lh)(`v͓_|[1 ^ I8Ҋ~􃱄\7A$Ex H+,Z]~Ox)G81Ue93;-dV"+I-膬~2ƺK/irh{0[8Ė"pDT{HP'w3Xuuʪuj#ZؙqzX8Ҽ\ ,[בX-ާ1coPtn2~\D3/QE,)NHQ?~hP26eL<8m`b1@@dK *ڤBpGKgu>'a֜cCbb+ʵ.bRKRh$Ȕ=U]!"i$;pp p%NJ^nX6PHR6| N1-)fQ f:|sLOp cj-+o )N8|>s]mϊU O'?GQp Nԁ4)^ Ae>V}0i5ٴ`FTnڥ*P{J7SUݮyќB2U5Y2R ᥆v a+ePo˽aPdeшipߢ/ACZ&& !z2͛(axCǩq JFY6SI*jD'0yP؛ڰ mC Ae0ɳ ~8ΌӰ9aKv9{wԈ{omQ@ 91ȡ~2K\rBa&`SUhؾھP9,ga@ P o% o İ`^iIW%Q[N;] w'Id~LKT,(p6V"Wщhe':=<xW1ҥ;8O5U NYD:3S&OyIq;kL6qPHo"o &a+ #(tm6̾6 5b9"BP ȴERpapYԴWivR-,=l`Z. DH\/KKPzTrSu0K~GN,K倈 |wAK+Y[-7]PZ DI +i]6a,帶 ԫi4x]zT^I Ȳ:h<-БXǢu~ Sϫ MR݄ MRTT7x0B;Ȝ-Vei5 J0ZcexIJ) / BD~ ev5z?z;:L06t5٦@r'3y3_9Jشn! w`OMn 'n 14 adVɚ7!Y&$kބd͛jf&`mV@:/_D[b1Sd@U9m6&mˬ4Rby޵q$B 'r/ a!k$a'9/j3(-)9SMRԐ=!)( XnS] s(rC/ 2#8D  +>:³?w|;#8D ֌kݵ) 0B|xv`bk}\-jwկ>ה+~TGn `i%jORh%̡K{4AzD3#+Mw mak$188 Il &BӅF>E%PO;~7P1,đ-|x nj!9yf R ZrNc N0M12 VD JˁhGxA .6N <p;T%n*9fr4!5@@☷3<(h v*HCt:B _$N6 7^` AM0+p(Di4BpE@rY)m[jsLT-1DF1ae4xF5`pr] EmRȽ U &pb[DrLAk-ƯqBDP8qO1l0n%$X8^RJ@ӂRZVEzh9E"0`/2\hk0cQ053X h.h2-8P~1Y u$E0 \ 1QJ$9C3ŕs*9_st+]CA A"m'NJwゅ$e!8A$&cX{ur9R!bJXʡ!4V;;p)c &1y#(I\k0֊!!((݈AUDP`9RG9&.)q2`\a9ofBqwӱc 5Gus+@{ C>{IUxx)N"9P*tR^%qƆP$P0ER±~u?1O<& 4L?.F<͐SW.I.=2`s_: gdPT?zqdzɇs+mOk3A}vLJƧ֬mF,iQ1 Zl9}8M(0f%cU+>jV礴DnHoI`*wI5r2q! 8IQ/0*^㈔qY 7f%ur%"Hp:e|~#q팴rqIY!Ӱ9=XG )#ezw&Fhb zJq〗5%Hz±fRN\gmV&]2yT|ۦa1JJ8ϐRjL6S-o;IQ&7U8' (op60$l:Ycn o7UJmĵqFV|%)ޞM.t.=~ڂE9HE{s_+Qp}>a]4uҰiXմOAKal% "g [%;1u2;EEi/ߛs7+(8ro3PWznTU)NjsdSVY&8VX)+ga%ZD3VZQIgL9Fyl潷 }`)]U2kv;P{ͮu{g5,]Tk%C: ']~jxu~1nn l&;ʼqm^ra߸_a-8f.;r ]o_s\_5X[?7PU.ͼ?6o߾ϣ7[}??FLaq4KYzVŇ8R:<[cEÝ-wu)/>̻ގ[?k1I]6rL?gaݻAD+@v k1Vhbme-&Be &6VT[вT{+ohVʭ? tP:iNᑭ$;jpBjZR!leY(Der!e?Ma2(U a^l(<ݙ`;G5%8bp!<ݠ3LzFqzGm5v`\ uB FMͦJwW9Q'&%ӌdmK5A#B:eݕlStLsͳ:'[*/ϗ>y*XVTP\]_~صJDɶ+kw% & X4uwW 1~ u="ˇO(_,;:+\Uz¤۳&ox-xOyѽGlyen>.bytL Do32#^vcc\0kqPIW1 ZY.H5I(HL4&a^ 5 \^scf}}$a&ƄWky3|tZ~-ZnFQAzMޕd2CQQlG3?E1Bx%(,5ѽejcY=@PP+ ˫5Y患Xj !Q~[M]#!{+aSHpJz)8N[:S:RY )$_N[պT53p.u| ߧRwd iD{,  !$ǧKj ѰV>8! XB]#)&ĕAxG4AOx曎4M5`z ƣ)p tY#V)46ɄSGd:FK{DHJo )sQX ktAh?]n*eP1E,T F ~HRDcac6d8Swr)ƈ\;Nwp8QB@DRtgRvIȁw=x9 z $|ȌppzR sr]$["Xe|`)n;]uLGKeijc'`(&)P)68+D) âܩ(<6Z!`S/`Ԇc)$Vl+"zqx͔34TD"aNʄ*L0OQ v Y$3F!9ks $=12.wT Ӿu0Hr{f_O`)E%ὝɷEv`،C?ٷ+.,I.= يLl:"ut7wo#GpO?"kۯgLz4\ lj"8 ˧7oĘ &3v66,J r-i% x?>0WX|vZt)1p6ܻ/\[7NeǥF2ٍTJъ_ߚNbuCŁp_/XPԄk1'Q7 L`@]R^ @:cC M `ռbڌ麛k $ lJ[42z1m]) ӞK l^[^}mMǨY$aIY%aT_\rMhFvA!=]>FX9&];@ ٣2/=?JxuwTaRx G>k<9C(0t4E0tvfhԫYC[Ԛ1ʎw0Nz cAGNeu EV^I6!EW#V0.)X2xֲ^r&EXpF:QQ/u ֋"}j~- } @\|b%v? ~lp{3n/v%i<흚 ߖC oǾ8僱%rDϪa_INc5Z? eD;[f㚌UJss{9VFO\Oe8_,c5ʰN'o\9/񊝡,}(O t>^MaUU$* Tt %꽞;zf} !2B\YOTFsoP)w;NƫjUN!$PQ([k#7P=E/.Y)'PW=KVS]'} l+2U?N]z!**ُ0%4%VTkU[6a$|vA+ +/2qIx}$kW>kI] VI4ܝEg#JTIbbW$ xnlxoUn 71NP^9Rgúyn8+ԇSsЪy\3s&(qҜ2wsbsX'/y U;J}UMc"?-ct^QӘINs69+?NiAQ5|@I&=RiX}`WBfCE\!Z -+Zx-~|F/Xb{?qqb{ oݲ#NC %(T`]10hQCJW: ^j =Bo.4΁ck9&I0Y a/XĬp!,bMU{gd?\rl1E5VKdf@(`A {Sk'{o)ѓ^+bmwK\DiWkށҳ,JGC,/a_S,e.KpYu O0ᴀ" P+ P{'.MK^UC `Ri;l짱n nPoHb/nge[|_1kWpq=ڃ=15sٖNEe Әf401Yq__9iQȌ"!@fFlgIyn! -c`hs|8ǃqQ_L`)F*>4;TG4\|8-l iRip@It aHq:ΓhNL$'`9&T̆1 9Sʉ ;U'TH@*BzMBJhm0LP`s5,bi'KQ.02JkgP^x[c\9PX@AșzƕmښoW% Ov ϋRL5TkSG@,!J &Dl%k^O" \CR> Dj"zi.j.5[O{_kuux%Siݺ6!e!)) Ř5 cU&5엢א\XQĭ9f! Aۑzc|il:Lu+x4o&|q3㯊7fVCh^lᬀV uLs:)s5c@Ҭ(>ܚ[GF*^橆^fΆ0cFKЎa/ BR  (Pm٦J%fһܠ`sKkc L `[ Chk /jL†d1f![L8 6G^{ѥh?:WPRɖ1 DN +(ʄm㖌4!Ţkӹ-(őHl?K9<evNuT ZLm]RdZ@td htMB!DVy囅>^f ϒ"z[X>#2I.#z"ˬ"U`,H j%!G"j7É :Z!s/2-ry[3lBY@@IdQ$%wtVg$ǁB7!>> (Fj Yf{DT!m KܑlK-\ 1fj삵_ yd3n8(I^43Fvϲ\&I6}+|!rw>35D$}f0fiԯcA4VLSw'<gu_]J `ftIՄ>SO]P:uA[3l).[PDolA!((P ZB  - \ڲbV&UY1l\iXp$Ls* J!SgtNSVyFd/-:bE&,LOP!<U ōUWHJ)'xUtrcȣK dHeQӸ/ 텝#"}仐WwqiV4}g7w5}Gfq^ u*\[6Hyn¼H#%MkfY{Z%h''aw>3IHΊl獍L%zC)2$I 3BSu4TVxn,_*hoMEr)MLs"x٩!5~XyEp0Xe!Tr 2쾃㥂+69{9kM »@丑VP0pAc67d+ۙݜ該 GQ3A'tVPF)egұ[Z4`A% b,4Q8W-Tn?yڧĿIKu=kQ#枵N9 Uˉtl76o|BuVt<+hQd]])n6|$߁8X<3c}B Ѻ!%M']Bqi=a=[@Bq^V*Qrjʾ}Xk2{tۡ`vȬ>׽GHvr33 9ie5$tjHs3ϞHm7KS>n  4XͽW{}7pwt<Ś|oC(a+WqwR00D1S(da!a.<q7.|<*}Zÿ|(,]"eXlkH<%Vc 9dc-&N/ex$ J1 xc#\%~8ί ?`8|0ȑ8 =* 1B_<=iB*ûp;m=vI- @, X+P:H>?M{jA d9l9jo%Bl}}VgOm=gvЊ 5%BqU :/%T%W#51ΫѢpQ߂ZBrQ9gxueGwj ,(Ahu؄VRk!ڙBO"]Cگ/E ӉXjkpZM!.70^|hm9h)hh㏅vA-Rc@X/\#p 厔ɰĄkdzDpf S ^:)vi3|OK߁? >feejHUswZ.nH۰-د,c׮2z0?~ h[u0@R62ln֫"0'17 AƇO fa+dt6%m !#;z3+~>RE̖1^8ߍBSU@B6&;;NEC1h(‴+jUK:Z83lWt{̖1^ ~9%ƊnOm5eQe)Av7>bRHW  X劎:dnp&w*Z5+@A}Y o7kP9`M%JuͮC_QigKb*ʅ:6(}9gHܚ&JdcpC͈CAǬ,g\L8}hǬ?w@AzmqKuds}zbxR!o 8D/֤@l<}7}m?.S3fGN~B * ܗm~:sW5QoǓJwќ' Hsb~Ť[1A~ҝY卸~po>HNNuߒOD[gRsOd H-@ds}%2^ KH}x}`)+jLb6}"} 6VΦ?'i)+"D(EfN9H^RM@J\""* K $@K鐹J5*9 yؒ;#,XDbqxP"&"@0@~%f7F5%`Uf%Ήd(37aE5|9Rdv֭,re}=Z K abtv[h3lz#Q/o$j#F$XX|ckW9ҽIog(So[P|tJE=+7'޵m,"s9Dz0`iQ4$mQ4TJrڜ̒LSMiRǢ wgFe:֑q{9y?ݵy^W@9As6Iֲl<,=1-hy@Zl-n:n>)o,oMΥsw$L)A{ G4hǚpM(Om<ש¬!ϻen<>s~Te/m3ҍrK8iPSp0j N{WN-8ڝ"kl[- /TuE fDdԌBԷ1kj5#qa )'N92Ϭ}E[͘2#PQA,Dq Y(AyRJ^#=|G4SFRjBZ >w*Ì܁I<8,5j: Xp@NKA,̩úq-uL$놰 QF0qHq`U bJ\)#3^BbtfBr}^i˫p=T] h{+ I%>ARX^} C[uD\b ZXP`FRL&8%QSHf_@#EkAfҖ ޟ{j6FȵvZ!=g8j *z 6[:A:p"6`Ac'Qg iB[=h=iJ$)׀^efr +S%L[4.ϴݢu/Q_\t5Z#&/n1*%YݛuG]OΏ"p1BzI}tt6_˛p0zr;#R 0Ctwwsu _.(;BDzTqMznF#+ k*M*'щV )*?7K_Vއ:ZMj( :nC"G2V?,qU*8=β{'.8BlrNĘQ@nexݴ 5%Wo/wU:#| F-UGcQ9 >mg^e)Mn mFK 403mʽ3('QYq7,yu"  yX*.^~:5 ޏk͵ "?鎇 #xT7' &u/ߚ懒qۀRhGB]R&S,t){ӯTxﶨWwnQrsa/u{k ݟ-]٢JwgjleWdX}U%PTs:W֝V?CX{gu?6[X'>pW,[}\3B=pz/nxVowY86K)Xl?Q]?@m?C8崓:fol*_k!]x}f=x]Św^Jwj`!Qv~Ύ jN%:AAAEɰT2* J%î%ÊG\*a}@XAV9&sV[M.Ȉ"ڰR*ub*e5WQN)`F&- zBbrmR ZQmf4OÓyg2t܇Je*](а!fHSĊ+ă#QZI%"DQpxT{kwl:[ݷ>V슔J1d3!S^w:pZA!~̀wGX+ xMK(;ϴ&K2rÚ>௑)q@It%9A`%(i:eQ%9b)&OO|1bND7əڣAnr S"Bו݄łJt*m]|i" 3Ʒ!+Y;84O\ i^*vH?h,ma͹O%bُ*KJF*7ΣVkR^LMV?[ 5g9"S+Vծ*.>,9ۻrxA3s-[9z nEJh) %j=VWzj#l{wUoO[N_OJej׺^yjBڭPI=afv2m?mi]uxgiK7t:Ce%)f+ʘ[|@x4]LSt_%:H+tQ4Z\#/nͯ{9"&97$\2} G|Zi*=;[}L췳;xIkwՃ@]A#j ad/Nj {=<`>\i58=I ]/;#zB2},qѫS;:*̀ET`x3 `w'7p̰7wF'0&sz{iWya| \j5g+N6=7_ٯrueNϏ']MZ ,Eևn<إVq@ Qo7caAh8xO,e/p64`x"1a!B5œQAiח IL&Pj.޷QLLdZ BYnwkߖsS掬d;Xss/Ղ|jSkf;~6}?;z73I o&wR8 2`zvr<N>y| o._xv7߼z8 ͧ7!Kٳ`xQ/*W?-n|ճ?>Jͬoo7iӋ`~݋,Grbhi׳ srO5˃>~*f榃|vh1 g%տ.'}e^| pUp7CX>,Gf(P!w):!"3} S\H[ݷ= [i,jg'GKZ!fWl* @]ۛ!)4fSS,pv \YfGP~G^t4gCAEJw!-L6b7az=eOrÇwicZx8pϾߍ0f`j0xg+]ta&ߊ>=>ot}9_ F7\N=|t7gɳ0N~xZ  @ID5@Ä- YUWUVq= 5,2K=Ɂ!jѴPsh!33j )U[/~t\>6)f\M+LkY+:cŋń.52u~,]m$FBuj$\ BTfǴ?.:[ n[K~-lhBM0AH~iL)H}FQQhgǡQDfmG7 bՇV7KAѠ\Qk!̇M'MXJ& Mк"䁉NOo?*cc(}o |sM_ܯ߾?3L`N*JceUcߊH\'1i 5ip=T ߎ6^_] PXbdl拇A`K O[ڑA-Ъ +7h'C/)()L?%51*ԇmL0s OXlaыE(nVSFTbD ola-V $Qv71?fj8b"WپH*A 9* }ЖkH+$Q 'H[ RzXȉܣ*8r Ŝ7t8&3L.f{|v滟ޟ=@0q X6Ok" 1b#  Φ)a_u~oe܅|$[4`?ǎĴ>l!uc(sʺM(,:D*zz׶Cj[A_Br X0e1yH@ՌY(S^0B 6a|hᩫ\l pYZo,Ja4ȧ#0z5=OqcytƤ䙕De{e%4X_)' ʛF9@ W$2h R2 ^+8hYs%RAeVI ?&dvzk]01PȖd}~DE!OWJ޺1!6iVZٳ~?v !i 1g2 [QX?aXn[Nfi6k扩y;5+`Hr\a5nz> &>,7N{>̠Yǚ2D1B\hZ|OR i2Wd9)IE&9!x.5׸#n_U rK/+ lB8\ '_2?!<;eUt(b+-VưAou*fapd>u?d2&v\'O&Cnk[ 4IڙnG15ek~mL)gOϔtLӫ>龉 9уu+r8u["U2ƗR bmAhhq[JWfJM*mZkR2 )y{ 9HN<;uWQW" 6@(E7yk;!2kƧTv+]kgM3-ATnQU6I9p:[;ȴ.ic+H@l)̖u+6-oz>Z] nu-di N6h*Ecy?ݍ+g+"BeחǦoP1Sul*2m٣f0ӔBapys0o]n8Sy?}7׷/}|>}R}} oT}Hd}|>s >|[3Y1Q]^43ӏM7!Nb_̻鏯OgI聝|rR_clOH:2\ż􃹜_>߿{0(Iq껟޾AK<.߾)vr'0)0\ap; ?b6߄ghzW1_%}S|">|{ ]_?9It9hg+*d >0G<(*cLgܸygeƠ Up R4nT̯ bXy+<ĜaGyrVTx/Cȭ[aXjθл6OoL"]]!~+䖹\yfȌg3k,cLr`[^E>0ǂuDeIV1%j5'RGIl)= ׇ.JQ{wO>o&!iZ."\lKxЪ/o1&SvD>ѳ=v;fkOոj:h_ Vy2~S=v$fkK'.!lsZq'N0VNA8crN&Ţ[n%l 89I*"&L" (5uiZqEM__ٽFzmvDa~Am: x[ n* xq%/U+fT,ml_$zuRNeKg>g~G'i@2 pt2N((QUfHS+PB<8k%9'ȱ Q ;%\ 4N#%<+aXXf{s &wq밷T:&Z3Tgpţ8 ghD4dX`XƆ|"\W( FMd㘋[Bߔ4/1%AX$QAE*bٕq+TD.rb:U' 46E)BQ#FK)A4$9F)KJ@p:-qR*KaAia%`|%z /Wht'@'~t돫>1Sӽ+#zfQžwg0M/JI򡊉g]}0/4Ubi^pprmrjaas8UF,Jl\̧f1ɸpDXo]M:+~f儻3*==r@1+=}r3M}_p|ڗ,qsB7/ƫb?~?ú8n_`Ϡ_8tdG?_\Y}pތ&]>"/gpM45F&isˈt0J0 gFHDr*3gr8Mpܪ:O^vt NCFr&k&Pt߾K ɚ~^?~dB;_fc#]lnx[ |]2~+}qHKsx8Q)/dx.r W0p߾?ǃ1xy/! >ώ>0D,2i/! xj #Mʊdi2xdTT;0/v.ї"\,#gTSCN8/vڐK{UDf26$Oad!/?B}轎`6n:[>>QfInlT#d s^!{gw0rwE,u:ǟ\%5` ~na⢬s{P>|B  4wHaL*lP",FH\YF1$9pi)$ma|0,&KȫLRs!(e0\gs P19 T ~**IRr XFN)ِBʊP=DmS'%%󝦰;bИEb)\<̋DD{DU]cGұ7ZgbŨZh-úZcMP DײK5y^\mC H ضLjH6J#XܔKZ܂qCtkj'=--x1UDr_[ڜ-9Hٲ;o~ g>X*daQ )7O0ϐ+]?<Շ=DZQqR!2{{/_-> 0]a)s؂ h@?\O~wzzv~:|*W?N2uij{x|FЌms6cvY{kgW_j ж'CJpO]йل\R.O }}^䀾.2&l-`μ 7Q bԥ >sϹOê8XL MC4pDV0G:'%&$1i0!@2Ai@,ՈpUBic4"6Ĭ^;?߹M6U OwL#{]g;#Ǖ (2B ;$Eʅ81R$ $ł8*L̙6@rZbbfS'c*7ՄRm& Sځd1HDd,T ,p$;# 3]` Lp2II$AuaV:@8)FcAőzp 2Lamlgɝؙ:^`҆k0oC+'C),4ip K),),9Xr0^0ARKa%Tk%8 gϮ= Z3{JiE SkϞ-^aRrNwg=!Iem}#TY?(̷^H 11"i|=[Y9]3OBH&0^sTm٢4!u lGX(Z 4.̹O].MZN$YF΂2r*\R+lRJXP4ZIo{4#taY3'*\"᣹3~ޓۻwL"Ѷy0Y e'OE Y =\wmU*'O߾I#Ȳ-@=YJ@˯eV@`呔Gk(<𵍣|l -fw v렞Ow7^S$:?~4{EM'J(,B˾S+bpmwa_J>ZX%6"RKl`:bQ ;q^%RSQ=.|2g }+&ogt,LݣGAn^Y>$, !r^e+35I*bnIYH Q唔 ڞ')an.sTHTEEH!FsbSeM~EZ-C*>n11>tUKY*c srx=D1U{cDD`ڷ{iW L:j]2F kp )hV"Nxųǽ UHA"|#'\/V>zhlPD4A;]n!)Bl~|xy |: ׍Gf L^D)^>k\"p/絣Z~!+&* SS!C{r_V=* $ZHM(Kgplk/\r?_҂{|tj-X b`=7nUP YT1 1 9E88`_uT nM/]&»C/A,nz)tk[ӛ~x65^V: R*ӤƠWǝjEMcV|(m2Vlh E&3f G׷jc0D3xŰ' S1fGl9"D1J3Z;d\3*USx3aL/~ #2 ("O%]6MT 92ȫ4zvU3G#`Ȍe!~w؅_D GƠpPz/NфѺJw5!!.Қ8 O)7jud%lH҂btڳC>H6!K )J#K-33Vc"䩶.brԨ}ǯ;3ec'~MveP`=ه>!yA2+ IPl&ֵtL*b";ŚpØIVyqaKH)% ֘\4=''l+l~} @ʽ6u]xpic$]obW}yO `fsu6d~o3Wn561^*nSth8c׹"|fq?*;cE7ǾzOϧSՏnmO"ٓnɪDeVLvf3{ڍp*ݚb3uQG]r/ewvkc<)zsy(-=qsl{ D"'7,nO1j7sK_y|`?K&`r]٫kKVGLIUk'eK -5$4z"$xs=z=zI^zBKYcV crM%XD_"6gkIJg2?a37G8~ 7G8^f2b\QV+SyHcD`"D])ޣsx@)d(Ś\d v hZ8B n=uS[28J^x Win9y)i:Hi̍A3 M[ij إ^ˍ*y-=Ljd:cjzӯkqA@ /%i6,SPrDhe$J%֤ɘXdiT`*-MIR" Oh$eG. Ղ@TŅx8Y>ZՍAF 2$#zM3-R HQ>8 e1q.$*m-wiԑZ .=vJ6N:TVBDMPx,L$cԔ`D ]0k,8ۭh{v6V~Iw&%oY?&ϻ"KVTgߕ-Fjzrɓ<=RknnGcmr[CY ,V\ ҶЌ/ySKy-P]d4A/_˼PH{:d#p/81J0] /izmCqЄkTr(Rwn#⤪\rfT:2l''yB|u^Y0ʬ2w&)@3a uo=PpyKrQ(汎/皦CoH 8b))gyí[ \sE)hjln(4b "'O)ockI1#.(Ewqf||q.>Rc|7zwُwx8[}t鷸W~JARɶ+{JdU{>[ u-h @+z˓R}51ȩ01IΏ);Q_hK'2 3H&2wUqve?_t&#,0GT7iG3 U:5l9ڴQ:iKi&Lʥ1"}"-5(SF^ߓcČϻOw=oOsD jF_$ 0عVFUӚѻܧR(HؓKPz)N_K'//j*>QRujέ[꿂Z+rO|gJ"C9cL(١[9&ñhA1geB󐡀 B3ѥE\dpA3Pu&v)R)3ɀ좚E.eGÇ(R\usk$J~w~ȥmc+ \(/2)2~| ;xj9s_e >崨U~L!b@.o+ǜʭ#/0 `L !ØPx FƣZ<<kv<ĝP,:c (mfFkUz;1>2w^mкL(e(h[oLl9FׁCΣ\SlTΩz,Iq$5c)P0NhLBi@iGazKk4/*>QT BH?2X] 0k)/6*E`L25ȲH3yGrNϊ v4n{7W ïQylhiڻk\7NX T.pyt ˭uEڑD:$9c0O6b)+{1{m3wŜ-GE8Y1,w luuū/n>˛lɏ)o|p' %Z-62ѷUNRȇU)f^U}Q4r=<@ų<ݛ63f;3rٌ%RO !"&42*ctfKqp $iPMjE*$eeS]K}7cu΂Co,/uˇE~iuyr!;bcEY!*'uy`i,쨌C]57=q9[KQn>I*+mtOAQ)wK&B%3mR8>; =v@Ջ'Z|gq6FhMIvFZ)_}Ic+8o=oKtfӻU3x^ci]ˊH[bC.5=.*Dwߓxj)ݞ`vJŰo[kl `:J/t^!~/egX(4smLJ ?98FOkqeTS0pkNs{-Kfi%Kg:gs-4?j[3%E|2_"[௒SWlF1 z/#nAEh5( & %9-_`/?$a>?_Wzx׻o?İ,_MsGU㾸a 1q,NÄT?{Ƒ =(}jzb' *3(Y|[o5%Q#f83ФĖ5W]U]]-iri%UDx>(mlkԚ[!!Cxg/wWͪ`4e2?^y~kqQ҇5;^Qx~3Fs)\ЬHG#7܎i)Yc~⻩=Xr3}mj<8=F榴aEY伀WTo{Q!&^}k.n}w>Hu.@nBtVa.ud5e}F-i郚I;tp?+N/phD7ul\N}Zr y(^_F&0Rie^QBLShdB*S8&Eid8xI}iB\r!hwW"duN.wFG%^&H7LH%J"@Qt]dJhc)RlsVi"'mztxkH*8UA*;t*Ih)@7O,$ϡ_,)вoV.ʁ4դ{i< `}~w ɈV/Xoa@l S0? 8ukڛpbTbu͵bRV;okC6JH_27EE11Rtg4+b!t`%tU"9BCJ߾OM )Wɿu\%7kOՃrH Q BH TkKv̗ VhSV;;!Vc ML -p [ ӌ>e * <}O}L2;URmgx~9ZUt@;O;gKUi[ѡvu|Se^q`,OCoX]UUbkEa1?s5ⱕm|Ǔ"DɶoAiN [jbRcoK_[v3ኇ((c}6=%8M򅠒Vj^#/#{ny&hgJWc)Dh}!ZFDH9?9Hg.D*Nݛfj;!F[]x{=Sޛ{1]?cxߢ.{ϤPC8>;qB 4h; VG:,b h>A.Vy?[ R dNR =J[- tqWx_!gbAL偁t!RCگs7(hi:srN\6 e A:1YYܿG 3{鈨gwom|po32-qoM`0/=Vu/m- rd;[Jk8up,+,E%~c0.BMM& -ރh!)U?[gfU|S~?t_^d0Ss|B spP(y2Aa-d nFQ^)C Vהi0Z%1( yH`SL^?_ ^_]!Qi4Φ4m ~="Y^c8k yt`><Bm+6|#gGz1`L^P|(h5Rz9@)|‰\A8P7QiF69-ʾ:;~.m)%gגU ޽(Ikt)yY{~xq;*磡ы#ihW2PŲIeM'CG;s&nw+?+TS^kbℐgY4 /sz*SNo VC0"E6l ec M&`jWU]0.Zh_EGRR]- !TWV$[E \ VYo|FP,16ɥv\@ %xu+1OtE*ĕŕDŽho_`#8Fh$C';+3Ŋ6FpRJSp1kosDyL%V5ܘ ]J&?sq/1ޟ\"M@ߍkOOJQJ:fMq\R@y*5>PCN:Y`< S ;񕱁)A$]w7|?_Cj&jJymB1 MJnh!P]h: jaXW ddۆ= JS]tߜzJۭVGR. Fk6{70q9`5@utlN ˶}5=%Be߾:cRk[KSJ (xΪ 2i671MCE  1)FcC}ysd ^䜍<BiI Κ,MGxp'hiC675m|mfնͥ:+]ўM}$Xީ_BΥ;*QzވF>6%9\q#/ɮVBs!RbK` 6i|]bH[R-f%V(0'Wߦi2mnJv_뻁h FЃZrD@ȸƎIv&yࣣ5$$'$(7=Tr\K%,5 &6ȸnښ- U-V:sz|Wxs1a.F<,#.qDVQ|z3-t+^1ť1`'@CgF4Mqӆqcqu7Nb%W.`$1LYR,.Zv5d| Eai!t:C(Db/SKvi"5+%[$)5Ph4h&S8(6mSF9q'XX4eЦ0dHƛhz΂i`tliD:bEdu :лc[sCсhQo$pѠ5DQi\ѱAZ,ZAzP>Jx݂w]]KM˺+eu;su;%Ppx;?cg;f/&Ǡ EEbx\KC=*vNd7I.~>} `3 Pxi>zR6&ˀUW?wE.rw3rwjcut/e2k 84vwGIM*BtWEcWH`wy񍸖D>Wd oWLB)֣`e^ ⧑o=蕤_L8]0*]t "7ڡ+- \_)dD.pF$SSD$: >[OW8|rFfBBpxi耣9qXN㺙/]1bYZ ^/yBK!ѭ'"&幧|WE8 (q5?dgXTxGȸT68.yh:#QT6Ss*$!O =#oIFJݻq y.qg눓jq8q[!qy7$ɺmw $1h?=B팊x^7΂(N05cD:[c;::pt+d 4+Vy7ޖ( }~2O\?yNM]LL9]Z˒X]Q|BTvh0!%ψ%x ^kPL: xpa[$)u`&%`'JŢV֩ KUt F!#w %iػFn$rfC.3{vw r0ⱼ$Y~Ŗ--^Ė[⯊*9#擱zHI[3pN#y#p*"ΙHyp4W|:|KVjGf6?> Ɠ[ &]>;Wd5jgyzw~uZzܶt<سd(ϭHmdɎ'q0N/^/X}Hr۫կM Uqd)[J[M5Qk|WoE3Xb3n-p6hOG&r^!D Q@2pr8RD`5 s4j➋asTH\=k«ۘm(|o,J1>6WxDYܓEHj#'=#W>v+Š蔾v;"Oi̡[Dj>$.2EI;P%;Um5K N0gA+9%B\ơ jF]X-=!5pX@X`,B11I JUt-BzŽ,V7NAH9jE *FhY.$ո@ -;(@SvQ8"19A:St2.Z*.:+!!Pnӹ\b歎h訅 S" @H e*94g%IՄ*5;+цmC- pmZ|)b hM+DHT{ưaLP:tZi<=h˨kx>\f909UL9jSǻ5C:{ssS3j~o)r~1Ok\@mAwD %зxԒҊ.Pp,&!Gqֱ, uq4kE! qo AKghd)) j4/bfg6Zؠ!]w% R& T ʽ|Ļn+e{NGXВxFH WG)rY6y֫@Y4y0Ґ;0L̓HC(‚:هk9T3yHBD)To썝dSd*_ГJـc-l s%*JM"076.=P+sFOӅ5 3%iu JT3b"$  2ܑ 񿠙p "_aT lB,u -3EW7*܋Z^tqV|՛F:AG)E_3[#<-5sMBHI&*WY&Qʘzxzr/6HF3D_|WA(AN+G7ABpMx_䤺V#U) T%T@Jp y<ɧ38dRɣ-Iݞή;EũBJI"H KT@[E‘h " *@jkJiᅐNHJ/8"L3P .=7 VjjH0oQ%>vXD~d"~Xs~3ӟ*Oz<42L"t ˋkvc`k巿1A{ EܔCߟO4 b5HF uv)͸jE;V%x7G܀HNof:Nl"q`2E{:c .~Ǯ p&Joʠiqn۶zJͥ^D(j7yE/ta}G_IzvuPdMgAJZ.f5FF'2uq loĵ' D E%N \D6eQZAm 6Ŝ'A)F"3hł~47\-%WHb 7ΊyoӠp"rjiapA$NjZiZ* ]{tR,. eAێ0(9,KEC J ncio-4_RIF6BnppKSd:kl'r@FK}h%A%'Ѐ@34E΂nØ8o(˝Уݖ47Zmv ap&1אX) ?'}~g7aLZ'qv&}46V÷}A Ҁ{@sQa.'/ĵa[ۻ.Po0u>YOFi1(z_g׾Wf+!RKkOMUyomMd1M:G1z&4}V~5?}t6GQ4;4G2Gg_b|rȠ8T31w?stN_~lnU =q9sЭKGd9 4:uS?}k轍QMÈc^P@oЋz-YHDlź=&T+Y;=ij"'9ӕ^_{my+PRnܲіZ,5xw4@A]g24ɟC_g-Vm]i?/%?|Qѧ Id!OVSn?=Dz`JǢ m\qB} ljkt?qBS諎t7_m8EUƄM+Ha+]oǒՌI=)-5S9e@3bJaD( F~Ezጯ\n蟼{&-&S$19͝y疱˧%W7W>枈 W \#JVF.3 q};JwmHn*R^QnknHzQz- :.:4SL0H#"o8Рm*cEnxa\ka 3zbP]?R4\dȉWX{sXO 4ŘȇgiOSMk6_ԛ-E&.xӬtv%"Qdr!Í86j97k^ mnr lq:>ċitDz{9"M5=Z,_{;rQ;9?v+o?R!!p-)Ŕ8Ӻ.kNr>wt@)wn뮨L Ą׫n@x9y;/䯗kl25oO v[x {zCe5ୁ5r`28Rhz/)SߩQ]s `O+uHLr+j:&y/R]8t x=Ei >g քe^ݑ B!=e`ԂP=8!_`=6(чCTDgaJ+a2WP_2YK25pu<|m#e6<> Ɠ\<=8i!K >O;W䙷rn}U2z!ckO$Ӻ^rLDVdj*2=uA]e)Iru7 R2.> I#as鈦QD+_ŦPb zk{XҥJ?| 4QHߓ\T eM<܇kE]gL~m8@Og0"| B\/kaz>ކ#h l$O7On9.1DHd8c3`P65,8m[ ,}S]uşF+aLeHDD zƠ''(_<|~)bF]WVe3nTZ" 7ҞpIku&lJYR([\ 63;!@ȄJ#^Ivʏ'VV()HB8 ˵b<wpAa C)W2 )%A-E 54GVlScD"2{yɸ Ѝݶ4OZI#(vxQo6 Mؙ6A!ʈ(+TY^|j? zo$qIK*Q8b^kiBl?G=u`^O8Bة ;ٽl Sy(0n{6._$mqâ bLoqnmjU ԭXDd-и|2 'my7>Lo6=XG=r~=caїacXSϥ] ])BhB!'Dކ=#V D^Ë]lo™~5&~70JyW8W"D12 ArƆ,!1?"VCm0~؜@ rؖu^ˁ`o?F78?'6JZ)a䈰M66 igbˏ\Ō}IOL\ vS{C`>侶ɝw;F 9<(+ЌQF; j'cT4>TؑˣDёL$19u(u层*E apAggRдuڈ=j^{ҢIxt&p}۟/`# *jRjѐw8FՐ 1! (۟{7d`Qp-ʖfr8F`Q}q~C~ X((ex\[c,rk<5 aH& NϜ[RՁϰv4 #z:f'h=+ v*:r{up COANd1{ńF^.GkGL@|sC7:3$r .b 6 l: `>t+*ꩼNگ9t* s{3SCa20Hè GzñH9&xs^=S=2m?CzCU7UJ Pȑ*#iF4" F%n4wrLf o^G6l;Fㅕjr^ +z}J\.Nʯ+߀Nmm9ϳ{1i3 ȘV\z~p6=|W|}#m6&_LJdIB wӂ@2w BI]xHP[4ծ\v áF&w6uZuRh'F*_$aS[F)B)KL=N:D +|Y'Ke%4GU#Y%xlu3D&Pj&P6*\*[`"G*]aց5A(TFGX9,' AGʘ8CAHʮgޞjb=Zk,u4e4elX2 M#Udo>.[9pԶJFr|SJ@-UQ1Q X( ?,osoQrhHSr UI]'ܢ([]"$ՃPba:]Q UWLtYP+8TAJ(01pFƏކWuhwL6;~XֿE P T 8'HB1۵6U΍Z *{~J)!4WGoM8H̩QHebHeʝhwa[L>pHS,{.$k1jE/lq0Q?E0Ӱ~AV_ϹM.#nj)6η#ƚۘfTiuUuT[?D@BU#c6 KM C,/G>": lC~!g~2: o,ƴ.:P-e/d͑DK:o=E7a!õnjj?$:pK25~ F*ir} AOd^{ĄL|f~qa첆Uu^y<ɐaF%طfEn fvkdFQf­Mo<(Z{cY[I0׉V>+lq%FH kbSPDraG! |OZڑGpVf_\7,=-_VʟgglK^PJ쑜zj_ByCQh'o`Fa'Shd#|khW, 3OmMЩjoOjӶMg:~ T_^{IQ{@õ~ ҉;)gw7;7"/^]< ʡPI+@VT2JAETU;v[~,̧Ԟ`TUh첫vÙ0ea -c1A{MioOZqe E3e|olZ~>u*r۔MckwFc 8.8^snTcGWSRLOs' !.7($VԼY1tI J\zm< ^(</ڦ%.1YxolhN@IfnѻhN z$?^^W_R4\?,bAm{3/82RcT03|Q: {zpTaH;2k/scezŞAـ'orQI9d7D\ zq;llWKRU?I'"%8MKӉR}o+FV>LA `Ci1g6أ+mgW8L0Q%F%09")KKs>=-Bpg}f|79FJƁ Zq&NT* 0/:`7->/E~&˹*4|9Z0~eǢ"|~L `W1*&]UO30% yʒoQ&{ǘ`TTBsdDP|X"z~6 otU$vlxOb4xMmazi|dW^ti2u/o=?@(шǣܹr*ʝ(wr:?,vXLzJ&җc` Pq3O=(bFPNv^ɑ Rb㿤r('u),-eT+@ٻ6%W9~}`ρ ;y}Pg}5IisR,8g]UwuUOu}vKy1+R6fU*<iv3^jz`{-vTP{yu%0_W$rt[?Ebqwyqѐ`9Dťu&~EóS0|3އ?np@g0EˡA1C#St}rQ&ymcIEܮ'4ۙ*vwJH Cz L 8nA\vݳIlj ۝t~R}trɚɰ6b)rvg3*@; z=jnuڋ=wU+b>~ALʙ/Pi ON!X{H_3]p+<8&kd n:p-vw'}>GުG3^%阶6&h][]D[\uڄX%V'ON'g,v{\`7ѯAxyZDARGL'}OMՃ P;5D ܟ'H8o^} 6;xu}FaרZQ0eɄּ.f#2:wLa1 qkA,}u!#YUFti&(\6@ dQq`<:oCfۤo>)3<>OnSx=YSkVI1<ТbʋNU^Ο_y"Ԑ;ܢ_׼6(g{쁻$3?y挰TV4j6T3ɱP -%MKzsIT'mM!V5<հ/$TLv$BւbXcKG"(8N9J?"W=N@#K-4C=#7QbR70Hb(փP xIEinZ֑dgMZ'#v- U}u溊x>joȼB?qNkz:u>xJV!%tvߩYۤ>`N!y&:)ƍWs5?%JW۽Aƨ_Nj!KʌhM^|m5hro-Bxȼx+W+uclAPzKrj󰲢8`DBtVB!`'%5~VN1DД } N 9a*sf1B)UZOeE)JǏ\yc4aIn2am۵݆M6sd{T<03B={4>Щ̵r[ `T89|Ȟ抢*RBV>.WrZ85A%A?e*yP{q\"JBxJ&RDP$Idž!0 '-:#-(&LVƤ F l53)WU5Z uTuCY&kt*Brp_F,eH. ۗ_%Fr,@ Ȍ{ьYep.8{a6mm~_Y>Xׁ E0\ hdѣkafkE( x-9x"XqK2)zaL_й} .LRb91v+:饹M,п%#SQ]> l:hᔴڊ?2KJtJ(#D@dJ&, k7LŢ֎%Ʃ: dī X Aq'mTgP9鮂?ԨB Śs)0WgiB0,R9cF䌛@Xt\9[x@9ǩ2ȽYD>E/vRˈ(͞A9(& 9ЁSXsRkl08$1, vJө:4$Xsu-MUoz v;;0o! Љ cS.1;aVкx`U'#ģFe{X>Erg{k`Pg!I.}= C vbeT*V8waHh҂ H e |w8+`a=֥WI'JIidUΉgO%Z qA-r`AUhb:ٹAp+o?N b0}3Ji|5ܱN̔BV꩙]K%i~SųAu_w tvSҢ? =۹>v酡r*2Ljgr*8UOm¬ ¬aPK~+RkEB(0Y (@g_-mRb,ZZ+_~9mo~μ)藽[_N 9/ukssKMp^̷^lΛwr`˦N/|ceRM)[(/+mʨ]q"1ebI5>+dH=VQ|yW84Z\;ȯКA~ \sT3/8(KbgNp^V\Ɨ*+8^cSh~}Y "eWo%@Jս DKF5-` !|\z<&R7`-zM{?dyzտbf㑽Xw}j*kJdWʃC \UH8mQq$y֣s(mFES3kA"Z"#Xzf+ȝR%JeTr1V|wN8 /)͘ ,V9l=7#".`FA ;,V0/bx*h41k_)SAz T")xoae[痫n̠߾"DRCD{{aXV8s{ "G_˙k%|Xf48W}O'/~XSE3]q9I=%%  onDŽ!N1_ 7qOJihs$8ۦt`u#F?Xx&?;Xv#2q^r7QC(y`SL(>a"chXgB&Z1vF+xc !'Z?{WƑ෹3-׾ L8M *D""i4Y䘁XTwN,H#4z`]6(>NP+Hd `F S@irFVFiά݌@*MU~+mS~tF+%2Lm( sPrp -7MPd}@##6^VC0Wzkw靉u97(]FHmYtY8Wgѝ$D42ex5:g7@^\r'1פ;P#=? '9qYʉ;A3ӨSxBNpy iENB |S0ףO:Pyw 39y;"vptTj~/fW6(E%#\\w<}.ä'i4*ٶޙY)ض[.흅[h蛅ZE%}p=ldƅo'򤘷̇gatu?򳝶.5ѯp5~ȽcrԨKp)u9H(*C##*tghRRb$m%?+hO~V_jӜV"U|w/ҕ-,m16 Li:o/51Duq\pʃ 75 ԝ  =O:UYBUuGȧ B3yp+MYJXk5vR(5.:NS V91څl`^*fdY&i`TdRsđWݙ9l'KZ7kUJa)kŽm2 WãÇl,+qҐ Ԓf|i1o۰ݞpo}-=L՗쒩ՋAr4ډ7}؍`l?:1.iN iK $HC#,!EQ1P3hi MfDN]y=ص0<+6HRużR{X%#mZTJ7;;Lo 2y&a)f38N$: ȊLR.= | Wo~@yO&q^` ;J-V>0f\(uDž>_#ztMx1:ꑿVKI 7bERC1OU27(g[Č ƚH{Z:KS*{ ̍_wSZ9ڌT@*mMi|s>W ü ud1eDO ҀLCY H?QYmrxŢ)bR)G-nޯQt{o%KJO;?Cs9L1b/+@ y A#Lz-+*5d RGgA?~7}cBŐg ;9?d4· DŽ2xk`)sAQM"Xm{/faxP_l^nK/ J\v?BNJC(5a{E#4]#}k]v@hPrkv(;1טۚ¨Ll;l C%3׈l@-WӢ|w| |H-8 ”izrXW͒R̻| lnl|θhdZԧY73`!G7sx~`MapH} .1)Aͺ#v i> ӷQrTTnnw&+ei ?)[=E9;rn1Xk>pBkýF˃Q+`("e6[)Q$ QS*ٝڈ´PDDŽcq FBQ Ñu!V U ;4Zqrv'>բ:Rl8u`jIFbp^ H &-QhFٛ di0U % f4Wʽ oj E18;hbqVF&K*U&/l?`&&]oPYЫi\'i|TϠbG7pтy$ dxaA1lRK] @Ns5M^P+/jf:E,xV?vËpfkJWG988/rE6uYWA>DJT/x&UoM.&wā=]@|;|; DRۮ;yLˁoB5Bm7ָ\el V uq $+ט1qKTy*ORD;?&M:>c/HU48\ &Y˜*/=~m}6'Á  &io 9*YFXDoFnth)x~_1p?M{c!ͫЯnxLs dH,AVp#B:FrMcBKz/B葯f +cfyPR(h$DtM=ƞ'F,f<KtʓDF LJxPJy+ܡjRk0g(0:zɝw Hp 6J( r=R<&(Nl=Eu{K'֎"M)=g0#X"kFJ=-2@ffnRϽr34M`򖲬~RWqe@x~%K^ߐ7YaԦADDῢӫg`wƓ!8 /oq1sЋٟwf2Ǣ[wF ~|hӳi90lńkgWԽ!KTʹfzM5q'2_cR\跅$'4U0 Fdc}7˄y1YZ5\=8巰,q}~nسCh4']@p»Hk/[~LN2xZ,}Á+&]B%+s]:uH3RޖQG sAE9 )'E<`<!8QV0ꢗ"b_wW]yn4sև۹)aq_J=HC-O#1[Oϖ#r4j:Qa?C=̕RtQٛ'9dqt߳g@xk3[fN3ΛQnIȿ(Wipfq2nZ]K}-U }qaa%؃@=U^"|b;u3v1= жg.~4}~ЄB~sdq.r0@!Q31A'Ku)92x4tz)ͨpGOɻqQ`2#FNnJy0}5%jDR}C8{ iAm]4hI jo%o}/ J.?@~>.fP v>P5^l>L!U(;}xp]* XJATCB QͥZIV< D,lv(FEݸSW@SS- QhI@9,J)mtzk-sm(AI: 0gxρ@*CpGd%%kD XH6IEͽ 8,kk@'bݴ/1׃P1D0<:ŽۊAEKe48a{lVEa ĝ mA!rcN3 6wOAHw" Q+E ,gzȱ_tv} A@7d d2tf0}edTʉlj*;(HN5qP)aqM*ك5OĚ!hy;P hAO p:ʊP&͗=JBmHkQ ^,Qmb\skBDlIE"OȦ 5ycT繫Evr'20o4K'r^g:"d yƙdH/v?ˮiYPci~QFtyC`>+7;x bٰl\*qnzǔFK <3?"q$Bq|kg*?, !Y5QO%:cnv! Xd2nTRSe1@Y0'C-Pq8VrBI$ .H꼧`@ s X! 'cdHZ"ʇ<08228!]qkT\αC K͑aM8xj ,]-!Nι31mf=֙QcI\f;n$݊I*=0kT$Qkj+ytJ偒]Q{!o:|ҰgT4Z@]`&.Q4qΌ۔N8'g OZ>.KH=k/ LjM2 D>Ng)>.K3 VO,ĽaoɬpGln@1gfR|8-$N,G'T5F nq\͑Ղѓ&%/vbQ% z a ${&cHLcPTzlo$fJ$f{'2Ofn\!璱* `r)|& b%َ>ظ%s =ε->$JSXaIIXaO5 #إJ_?IpG߸dOچ.{gL>pSzc\9q>w.Zcd]}>Ç9찲2t(,Ĵ#mtL[;q/AɮVl׌4hCK~RSZCT1zfռs^t:w(5F0ap?.в_$-rO*l 'ШuNT&6dA0 F $}YbDq#1@:4Lj`?!iJ>YIt@ho'1K| 1W)ǷFI@v|QYLKY8-Nq(.s =?קU!!i[e?Fx!DdHf2XdYvGJjVAJj>rfjJjaIM:CR*U~!|@O"km>մ!B>FlpDxaύGC :oV (T &s86 ͤ i IS-& 挹xbSء-u9|6WDI\?L.g3w5ӳrx-xJXS/a03p_}\-},M .khI{&:7*M܇^b ̙̂4M[zִM-ff!.4klev"t 8h ڇNE> :q>z 9zU !|ޛ˫/r/qC  zU﮽[1N5Y#xS ״8<,<ȼѥ^/χ!G޿h:-o˫g·)Ζ_/la;5Vo < ܢmmbќ" }{py#&)aJOűQJ)X#uze_t"-*͊uEr̬E0x* ט`,FhM̙8#kPDm -e").[EJY :]n1Wnm#8,}*>g>Ši۲SX3&%Hx@cID# "޲F9ӊFg NQχj֓+[$tENJҭSMt¬_%r5Llk.7#ߘ'z ;m?G5 B68{ZEן.Zzj=W^ivܭK|.ZQQH_#@Ͱ DhI5ʠQX[F S%1y_SPQ&ޭ[w]18i; R{<0PR{7?" c*E^PJxqNJx{"Q3aXd Dү.#Hyl Y +% c,4{[hs@K<-qIy9xVݮvz 9䶙|W#3[Z &g @MT&h+աKq="^ڣ$Us%ۡVRJr,h<)\\™üjf4:->%~~p+9/;)//]bH C 籺[ag c#W|B1x7qE%W||nI L.W6jٝ0>+7cCFe*XT: {29gt 3qxkcmZ)#˓%ZT;E-iuۑrDT1~Û.Hk3GLhZѷiv$}9k׸;(ٝ|ɕ- C. /w- : oi(*&:!@[Ec$To@8N'[ㅭ^o):Of8.RrCtk|PPXh]xxǡ>f5_ u+uͮ+1R)fIisdfj-\gE5kɿkqmA;*wfPď\}~J_[ F6Dz%OnGP{R>am;բI{܇Z&{WNDWoJ*vo߇'ZMo+ k읟.?Twa=i4\2Fr)Es;>pElΓ]X ,۱Ga}sVt/%eOv܈EAז|Q ꓽm'Jғ) َQKURTqq:VVc믿3?Yob rEs$yˏ_W/7>-xOޏ`Ӟ!ADdz?/0kIf1&'^X~ z*36GS;Wk.&x>Ez. .VprBBsh}1surKڌ;HpD"HL[qFPޗ璱8leʂБi$mnIrځ#8Cp`qyh i+g3+@9kO;sh~")qGJQVR+@IȌY@_7I,R$dEMe_IWd߯8#K=R=23#^==%_P(H?5B1ZNf7n$~ z,2m{U%J |CLſ IuoIdIs2ZeT!A9SLotc% 3Bvli;r(P戱 &cYVpdAY)#2!ĈJh 4&xZj5s g^= a54A ɒX 0QZ,5O d,$ΙlmG/ɬzܨZuY0f!R4:jH/߯@*lxS#jX%ǢTXBfUO,CG䳹 Kgk1a\8wV&^*f >xrZʮ-q6ܢxS>r@|0cy;La4μ'he򉡐J[ɓ7:g5ɵR{r1Zcw0(%Ns6J1f=0,OLpQ R\>۾jV;L\tɒƽ5(zqmBݨPUoL_j}=ICLbaSvc$T[!)dOi[.##q5e&zw ;O0$>Zڍbl_vܱxj%eDS{O _w+"X8ًi\-E)lotS]o80mtNQdtn'w8>|(G4\〙d룝*FmsH#v_ɲvCoO=nWןVۃ_}> }1K~k8߳;0i=ҭKJ|ưOF  ݥ@t5LՂw8>|0 J֠Iopi|+=mJ8xB `mlL," ;(WX'Zڱ5q]1þV腠'\|WUo,V x2L*ls,m^ y0Bau_XP1hנgfYO⏨B&?}- c1Z3]4lTITМE&CPU2@`tk۲ (%1[R?Ͽ맑UF]д_rm;=[I:(UM(߰rq-l޿:#kƧIVכ?i/Knon<pb HK<#/;@C4=}2A`I%RX%oؿ, O/OwKa1Eo/`AR<-FSww.N~>%34~XҚw:zOr?1z ^NH*=wbzˆ@`8Zj֍]uq&SU48rSd}^OpzܒT}J\')2TL YzU\חj,= (Xޮ/2̜rnJ!-71;rxSi=pDaZ513\4J[RɜZo Q '"ĽdCڼ@qRc$8:墥䑫-p]m[8S iJ3287L&{>!ApddiuinόTϙ߈ᄚ?`F%YR>h-pnp{IqMːZDB}\EH ):eɪD:*BYy1IbR"*Ah T9DQ @&Jd)hh8>)*xp i4vK %aj8O& ^fMQTVm9zPblM/ pk-AVS*HA?߯WK.r~w\T}pߋ"^.{cF/d/є8Q6~{wԿy25Ox}'>-Fʔ MT]"c۹EJ:9_ワy-QWcyyZ!debJqoF! ά$H{iKTxG_E3> pL5RBƱsQЉ;pe;;xqV/85h5j1rq{ gd~Cr_|4ʷhV ow]^-~?6u⻳O?^ܵ@?4ŝ}K -ZfĴsp@r ^s|!-3j|^@6`'S/}l*qWQc㿨Śb]jf[wdc{B{;roY ݉NٍЃ7O]Sxm=0Tl==Ddz>2:&ֳ cz Aη_VE>%NM$D ~ż?[uCB3}K&[wI88Ƨ>ed5gُW5B-Bfllʯ&`6/Ej a]0$CVw hC8QF9nxn4S{4vfI+-zJHL ]R/jԪ쌤$|>Nϫ%j=;A-a9<'#|"xtN( g.ģNFԢh.RFi! S)~À>8jZ @`eϋ׫I8(_]08#Ƽ7_J"Cp-,YYq@ML&h&+@ijϨLr|0Bn`ǝ5Év=;|  K4[4gRf&x|ND0!wXxZA(S(}p|z]lxmG3ځU͇901W8zkO_ yF C!j&#qd88@fqFXq :Z'spY9'G"vZyx1[qG~_ƨH'6+I$dIr#rڡ2P>~MmU/5{hu{策bFfzv]ݍ=xE9Dw%j=nUǙP鈌)(m0*.gD'98Yk[+f[G[Snz4xƬiP"p)lDek/Z-qFP2 87WalV3iusU $>H~*BjG8+eLːnZZnRkBQcGY3Lm8 h +ejvwa6Aq'_@-fv5 ,0b< r4v9K>\_[`{2,WdZ2ubfRaӔ+c 5Sj~uCQ+59Ww`A,}OtjU՗hDl85^? VrZc_-T&]V"vD ț4D=qIJ@9^޸$X@]XPPvv9!Ny9!%q^}gRѽ .(7J%d! .}o?PrHa<غ2+}@|y7;a |f_ԯ>?I/k6۰&oÚ-mpL<$!GZ PqO|jp4yiRaAa)~Nmp4S/a>s|@ain@&s C 428|4Xǜ3zƋ9KObw}Q1omBW2Sreɚݷ1ho-O:f&~_bWk~Ʋޠ0WWs?· g,CkiZcr,IK~܂~{?FL ]/﨡=H-} %Jm×H_ƈMe]m mUwC/upK GƭW[sk7 8VsRɨ7cSLRD;փ32ਔcsowr}x@;RSŨd:WsYu:~^ֳIJu=p ;OL0B1_T{05u`yA/GYPŴj *[jTw fQivF/3MSYmw9i LRO#uOS<QaS\:/ԁ S; VfՕ?jw؆jj,x5RXXlX?~?FfTc%.;J4O4B]!T,0*)aW@,5_|y{Vwp鳇MZy UXԧb0G2Ȝ&T®Bk?kl+NTp\J$əsO:O: t@k .옐OvBc3$Y}iupLщgSc$F"Q;2$ƚĵ@`E:Tw֢wHrȝ-dٶ&e3uHs8HeZ>9 !dˆ։%:$yIDhq_b+MБ:Q8V%r[1.1GX*=#0cR)JG8m'yIh5ovEE za^8RHe Q[Gc7ZqIq*ˋUg㋞U@dl֔+?BF[RɄnjZ7]ǖ'T}\{YBٽnP(Rǒ[JPy\4+<7*Ibr2[l=!a6ۿL䦌sBđ` C>%(2RXPB.Pe1Nc~ nTuUmx5GsoBK '!QMGrsulYz: *z;vS]aL , OD31ԽPU2թHR*RkQ~xٽ}}DO?okuX?afe/*~]fB$ 'L (fI&f^I_,5S#n #3i=aF5V6+`{%]zn(js adNb)YSe1#eZ k 9zmg s;*'K00yc1&&5k3hD$Ş\3#D}p0a ^d^iv]-ީl JVɂoSd[٪"#ATV̜s IP"2*c3kcFRTXv`$IzAL*pO;iśˡw77 34_'m|G}whgUO;gNh߾ Gy$r2ERΑpHGpDc"@D/U!M+DӐHsqV 'SR2xJ9*&03M@烙 +[d) ;1"b!.9{q80zWzwG>dxeloC+$AqRRN;Z.rj8;O%Jy𘧹H@-gvuH0$OZ!~@C@D#?tSVL-[F+ vM9"g*@RRsiXWu󥢧GT2S W+j1"͸ M6~;ftTeٺ3X crpwvAc,jRmyPh6~ʹո*]گ0PPp˚Keփs b j vRj8FT r\,|W~JrFfJʋ;]y~zEIjΝjag k_j(Wx]dRm7K^j\x]vm[OF#n*|%} JE)HFAQ}m|$bwٟg (X8مԈBj͖-?V\V%=!s[!{rl \ŋeUƋeL*V8½V#֫t֮ }n)}SY-ъADF%#ys9]EC(԰*,{P)1*yurx00r|L+,k<8KXB10 Su J8mzҭqη/1og{EF|7Nºt?p8%l|>bnA%.5 HOޓZ׉kB%kuՄJ3 .kEѷlN\ͩVJ4cA߂j).V(Z.tDF ҌǭASe~=0w}!&UV޵57rK'EUzHʩTxk'/vm 0˔CR]f($"\hu4=x~\&aiFSn! ^#cYJd-8w~_,OE5ZWarв UHUt:^w (Ccg]('ր*v؉t"B;=/%AyR_ 8Rf}NwExQL_6\QRw?KtȾEcoJ8N_+:]SR"'fbٟàܟΜDK^8luyΞK,gϖ)X5? o|tI?}F)Ѝia/=25oo2JIY+'wqDUޯ6<"7RAR4< g('m{oZ%\$ 9s )v`iۻ1L [(>:ޭBS7jڻxzr&dSTzۻq[(>:ޭS[nS-ؔpqz݄M8w tBQǻuNctۋY<[ 9sݲ)iS `:%B+ T5tb/2=QB|$#X$VCs2{-!-"}dh ]̹Ea^Z+B6'Bݒ&PAO[1 !~+\{? `>x?&l?u1կ *uxXnEŜ!wj`9܂`r˸,/w>D~4ؗ~ ⍙/-7ٯ $aE-GqVMsAՠJS?c%\ۄ"C| v@ƈ|Z@aĹGwgRPl+@tX/*'Rb{[,:Fp*U!d7Nƞa]WuO6 3^-&L~;Jbsߌ/Z'kQE}r-juh#ML,F90rHX\j&Pi"&0S)D X' mfpgSf [k0JZ>ī ~f+`#3 FU5a(V;-~edSŬbwX?:Յۜfi,Y>?'8^=0/FճG 7K/;b`p@X<0KuRfT4(ZYB D:R c-q%T$ьd9cH1p%$M2J!%c@(,d `+ -k,([CJӜ*c<иI,Q@C $#\ X$6ZdodŠ 8lTxz 6׏I`^?<-Y}#Ybqm]Y6۱ۆ7{)(~kMvkoAj.gpFƾĈ%F)`hej+a׋ɲ7u!q:F?HQ"p&u.,+\1(CƑZq L`@"=\b5[kBvdr2)?0DJ&ڬ)OIˁ}(uA. \ Vu;.&֔ .W5!l-6 C_5lDT!fmWa; lՑU,(,B'KF}dqKc־Kmjz˭z/ޒ/nɓPr;tCZIeDdP8r& x7bz?x.o`f2@M#Pp0IoV=ILpɶ')6 ̽'4!Qfyʛ"{ w zpI}/P/K- +]+(sBG1Q}^lv~e-Ӂ*YM YUM:Pd!>(WgXJ@$HT*hԬ@4(Ne LSa2L0kB( s:9[sb [LrLF sSFlِpm(mž+B^[= ,p yRY

3IB* ħ]eLܶ kRџ-6qoG<G H}+tw; lͽU#7+zAB[j<ˆ 54O3{cwg'bB⃚,=NC%Ƅ݆uZ2shRp% F@yliGS1"Q<ʥJ2y P س Tlÿ^Tf )'h/⣼ q[ut@)ۣ; '+m['oҀ^(xv @j}rʦ3L3}lʧd4X'LL B O dB\eJҔ$[:$;;pcu4I"as%T=IpB%Ifg猰`0R':;[CZLjnޫkjCOKlqs&- %9mEeNi6'%~qS\MpW Ez3^wp;!@qε`Š>UOǾ;X9ʜ.W~[M#(p-|A^mh][Gw-Jox][^ݺňU[K ҖjD|U,j} a8ꕆڹ}ж(EBIUuUJ)Vn'v뮒ku[_6k[]y6 ല(q pm vRjgH-\>D31f-hlK[]3 @5T0G+wRzʼi,6MQϫ8hk(=糬lSi3ތ+s`PzD:/` lӦ{_!XSn؛^?ҿޤ#Y:{DܳbM*otsL$H_9gD dQeBDClʭkخwLbc:Ϩݺ|bP{xzr&dSN>_wE[(>:ޭS۟-ޮӻŰ37 "XM8RwB11gnnJ ܚw n1,Mx 15)RH㌳0ukoIl|X`6qwjM,wrXvd<*;o`#1v5ݱ8ZT=HSkM B]9*E=H Y{j%>A<x#qI#oJ"V gEr! M X{(.Xn㚉kdp;̯'zkUirzG@2X\D(>35aZcÚ'L`Rib 9Bt&(DsjzX=$'VNR D"cRTshdJBr4)'L AK2`r)޻bw]EZҤ qYs1ϓLc5ʾ/ۉ7}j{V\_7--mnl9n!V1QU?>d^+\xa& @'>WZy#Ng3Wj_دl#v8f쯟kH_zfhn'0JU0 /*$s*UCZvLߗ BR_wW4FRHN5bAc!zR MtSnAŒ93JJ9yrBh1BCBkw_2 ?!i-2P b MYVh0!E%V)H97! y)lWpR"t(/6>z/] wzÖPؿfŞ#Ҕj濕gr.h z+^T+r}ۋbDj/FF@uujiéj+B6uH*:-;&ƪGUº- ńZ[`(8+ްkוzo&?\qg_n3܂ X͌}L u/v*=Ɓ8E($$O S+` igqrt",$b^otmr@TTj:b$C#b^#I B Lt!m5Xɕϭ+cCY:߬Rɞgr'@g9SN0/MMF.s^SAu0K9:HK Д7]{yV,u@<&W q ^!5f}Z2Ze $WB+sEd (!idm 8҄#ZhqțDes <~05o<ց*aEWb|]SmF/w> _ S Hq顟wm܋%O[W2W*Kiъp83̜4oFX[Sjz^Vr_27w'GujL"GU-I[a@}G,PK^N K N.F:QB,WܑȍT(,gj3On /6x_>D==7z߿>X3~R)`6[-9i)$_ޜpڽ2`YD7@fɬ^,pfcc[aLZo3[ (u2oWn/^\LKo⭏'b/>d\]X#~i- "l<Si)Nfe} B&j}D9emuuҎ^IaC]&/OƝnCxZ ho<@[APi=0wnI?ܹ *U͠vū]W9h0V爫uy0 $x)aEN&+%њZHfRƃ hrWh$-N-5O~K}>El\^kWHxѶOY=8]~~;6: 5HΌi57rF ujFbFϵ4 9$ۂK^VaiYrm)Py+Oۗ:Qu)"DHbĨ!&v堸:?#,"Z;l3Ah93.*ݍ[ْْ-So+ Zq0 n=Fn=>18[G}7Sa֕j1!ViAd/- VrAۧכdtU0g|sMFx, r 8$#LC{f^={rx=#ah)fOueT Hcbd8dbO8]~IzK9^A^1AlH1^qQyuA/ Iw)@kf7TW|ݰ ecd7 xwmj#h3TwmޱpmƊ%8]svG[]ޝj@ SeyFn*Vڭ-)v&m;U WHIn]Hȁh`&D\ҟvkA贝v)Ԛ[Fڭ 9pm#S4U7vr9vkA贝v1tCGjp7e#8)Ċu-GWndyhڄ&w1m2̔2#VDR, &m!uPW2>+ۇN$OQ0n{Ձ,oZl%̑E~X8IF?Ky(QQΫu7Yf3aMJ(cܸB2L_y3CAyO"O}>>?OggBՓz.'JkWע`cXxu[O2t;-{[65NyΤfh 4j[AYœ9j* Vzm-@oU:փRVTUz T -sa5 ,zq=W&8N "kTesV2a)K- _+@+)5/7t}-8+zr=%-R1t f!^w|E"Zbε - m$ c%xưRIrc_p>v$DO*,F]+Wvz؀FPqg+>ϿWcJRsAZ 7~69b,IߤfMA 5Yɢj?VnTVd.kHOzXUݭS:4ش=4X%]ďj5"RYN| u\l*:[5"^; e!wK]'{4җFge 6#d)-9[FFt"zH^MGmhwy>失f>s2w~mOӆ9U ҦAp! [Jw'TpYkN4!iie!"jOu'5M袭E "}Uzw'1o;BX&LoNڷe-pL ʫ]# ٫{n$yٝ؝0p3`2yW-]>pAƎ*^m5*e&TMBN!L}C89_(Yj:YG XY_O1CNZXK=ԠQdGo*!j1xg2UIiUI'q: +GI:,;gO39 'V;{uX˦3ɵyiDM&f(x~ݫHR%plub↉H/+GczrҗzUBP%I'>c9m}T9`2f;9O^+<@[;k: h9=E b=e]ň塯~U[i76̱7 `ct$S;S_C;>Bi V+˦~!yꫲiCWHk+V2/EjucL1D8B]=;a#xT򁂐G9t^䅕΅?cȂ }' JPhl[2:;AIvi)AAfX0 ͘d{ʺ =̎vaZ}uԅfSrqtH|)3#$w.<cن90L#e"J0jIؼOw fINldhR`M 2::WKoFfJj_|]pQJ[kY{Bؐ >]ՋKnKp%Dz:f Yϼ. mpv50GG5X_zQ. kBadaؾ28R8pE>1y-1Z,8,Gk}HUO}; xmΆ@=ꞣ- l ȕI: 2Ѕt҅TMIg:QZ#+Ҿob-ق~*u+JP0\`{1 sGzMp6[5Y mziكD+M1!e$/Mfe,hs)`)%^[_|S$mU13rRaڞy4ę+^h=Xr-H ]`bLϾu$+C`:w[gkZ8ۗp p=N />]GPx/71<&ڟDj$ʟIȹ<M6^0S /N !+g$FP泟|X} ήi W\Iґp=k;-o3 Tldes*z8']tiu1;lr%1]8;qCoW菶s3VזV35jك5Jǽ{ j=8œ彗Zm@⪒9ZIQ4lPsΑ1:9Sv\Yfu3+0 }I Ì _oYvM4a~x%*njYmUL"c͠u2"dH4b$cXi.a 3e/]Ց &;'̈́u EΘu@)y-WHN-)WXC s]RCİڒkO;.b'ӂ61ZK U,VU {Yd$Ko()X͸4Q\S CxQysQm4Yz[/Z![n aL0D`6{Sp<9͍7Ťs bjp^pX3 1F "W ^]+I3DFeF*x2HK)ײׄ3ŠkA`/AJH%`KX҆q4x6c$e|!:p,o9'1 d 6b" :LAGg&,J?N{v_/ J͏5PaLKrtljV)0ȈڭT 4H+ls!xo)H h˻>}%”kibt$ë墵U{#bjr"/%RIrF9/%@%gZ„ : " 挴.aTaAp0H.j"aFUI%*!p # 8VHFExc{F511XttfTi4 DZyama־F%ؗ7j%`7h^IF#q1(J4KJ /%%,Meںj!ъiY+g@)LR {^c'ɰ95g43aJ$(Xe ʗ*M`DuyJg|z3HFf5u4m[2C,ZQ~s٭&5a^VĢm-2L+ÊIB[v+L{ۣcRg{om[tajQ8E8 ]*uYOAb3DZN3yiœZsZE:+VfiV"UlC8}NT+qT{TKH#8"tW)xK5ե}H[ +oJ?0`_:^gqQ$˚ `6]bG*1O sZc76b;1 ؗ'tFbٛmyʶs֔%v}7Sl{N<]towBzutYbe<$!?zɔ-v;`y*Eg wn!$QT+T>nu|Xr@BQI87;[*u]:wmd{s<{+(g)c2!lZlJY]cI.m2Zb1QH7(G[3 lOOѨta>P;q"6礵k:}7#6q:_)oMxx}HVz4L`Tȷ)_C k+RE̐8FL"0YʆCl٠Wʪvy}#+!$=N8\Jr-ċ1΄J>wsw\ u{F2۷ $R˶{|{crpG]/qқ~ S.[*D{`2p_*e\P_컃_6\&-Xp[+a;U6@% +1`f]\qi*2J0Ie$#NHD&MMB^+IC(/BeDu ((xܑ\~ Ut Enm_.$8 C:-=T|t:~mFCӀPgٛ5cr%I(;Ju}Hk^E<͜C#N__L 8 M0UqFUqF3 "c#1+j{v.j[<9UԶG*qf՚W۵pbP;أP{އP}\{7>!+jZ5[ xkxs\MFLs~.ǑVtXw  /n8qw>qgaȹwaFë g gHc2!p*kniSL*4IjyL<^Zy)4,RhQ kUck)8At/hKSHKÊ+% j 5ja 5 HZZ6CKB+xGfhMf"Hئ~HԇrK泈tq,[15H%*XFn`a**82>L3T%YR^ Z*ӗJ &(7Zeުxaf!,?}딎5JGݣhǥd֦V>"Z5TnϖDMMŽ ?1^,FGXOJ מ|lA]݋!җ!_RW?VȺ&WJ9('[&GdAds`[Ǡi{qEcɕɯ`EdTjk4-x@)/z2 73F|B] }IRULWF>ťp艹}p[ rHBhu6NU, eYyɓg|1q0lpCM btnİjn.3Fti$#tJ썁핱ӻ׷n-76cQbٟXf=yp6I>,`$#K@&op".%kw݃)ae}+"%S}h7~t?/.;Fv1ٙvqn!$Q*=55D޿ɱ>W@|@."![ JR.e_ SHܻT3eyl]!s+û~tdR}S $7$IEk!( dh~iQCW#ovNӲ84ny,6`%,!K*XØ0D_@^mR_o;<OvRN({zO<1xȔN*d)09l=l0+mZ.ߴ_.0Uw\hVU Z= #:GYMz;gAΉ .px$wʼn!5c;0Q. yPǩ)Ot>{[=8e^(ͳߌGjkv8b߬۝&WRR!J~:E[gM !Q(cb[v`"5L%F] /A9 e<VWSOӏ (o9y:0DDP &RSW7^!v o,R2e}DY;ޠA.%z1a.ilYelΆlB_!CF&PX,IƹLYFS5L FQ&x̤#YB/fľ_ ],vqrp6 zbq ;>@R" 92Egޖ20#|Px3xOxT7 rf:9)O3#k{c~s*\td2xJŞĦr.j.7Mp<ղpJlKdX?nja9qx]Ň֢t޺{@ xYB)J]ۭ_ƨoY /4Fcmcƃ Q4CՄ Zrp﫱]Qt4 $vU"eNK<(UiOXaZXX@T-K1))&O#ܤ{qlR鿻T^]z0buȴ&2})X {Bei L8Us S2`̅ޑREm{㙵FX.8AS"EZHJDy z @K2Cg V(MAWXǎCk<3eTxEK$򐼬/x5K VƵ|=7U0wcV K]m^)tD<M?v'+ ܂ox0?y`8x4/b<Lᕻ_ Wp*$[>;v~}CzMn88Xi\#~sڍXyOƊs&YVdWNRmz_3+Jyuƥ;Sc~\+W]]L\OYeoȎ>>ާt{O1UXȪXm|:gPJL_*Ei8T]Y5?_AUEj=XGțF69 g_f0.?{qCN@>n\ic:+nJ_̮NwۢETָOtTuح 8c sS|U<|r.Yif^ e[ap1l4@wa '•F7 R˪L,xř݋xLGm3Yd@~tJJOi:DirD"{#䠒І*8.`իfiY@Ň'VkTЄԲr7qoA=夝߲O鹖Yts x3ts9yO""~O,߆|"!S ڨ6+kkdPa ~|b!}tG4zYeN=6ȠuXV*Jh K.Q04f='Es&*IJ,TK,pls6+hm9Z0='7KlkqDi"6+Gyfeg]G(ۃ~jO VSZClwyX.͚;4r?iBh 9r D#&0u=8d>r1H b>Ii-&u-&1ֆ|"%Sx|_$-}Fv5IZe4U!!_fUk/Frꓻ8}Υ׺hvrd@1;,Z %£LNɴHm`Y lC[bPēn"tƊrrözǓF-wlk4X܅jeۑtۥŨVa뼉? V1YǵcؕÀ3@&0-7.$vtoڻrNnښ0I" Z:QʸB\!eƝm}kh[_# { '>f/5 i:yMD>VtB1C=:Z1ZrwVD<ƛ6ZLu:R0jr:1뫋'3ތK*2\=;gX˶:SpN'%0 1t߆ݵ77/M-PV%R.fjEJc էQ__]|x ҂; PP"() n/Vg(bA,w?z>  K V>>5|E}f[f eT%pˁ}|;ʛf\բ8?}>/x.Ztx{4I *oFV ȥr"[!w!R`Nu &z$F9'YE:=aH)^ێM,]q.  VߨL} k]epX'O5k:&2^C+' 6茸RF+E`iY;<>h!fL8A᭱@&?֓lbVcys2!`,y@d(Ġ.W v2ؾRw/@ԇ,JH%*}({x25HVdOfyI" *r()QzN>R{Lgʚ,^Nެt425URβ BFJ[U,i\}w!/GA`IOFZ#C#xKTY:FECRx!krWcDԪ͖~y|sSz6\!9G픛A^/}>I(=&GlÞ3NC:t}ȎW}ȖW:!8NHI![R&)}&VMuN&Vc]MqK1/n5k\KͱjImfݦc ugkҖat ėnE)}!4hF'ܖ!C<1UjJZ(\$wF"ݮ*bZSE\"FvXd,g=Y7c[>,E7%ib^5%jNXaA>ͬ %ZЖYupr\(A}Q U RqZʥW9OD<㥲>uMb/5$y/+P鞔.X;+jJeMuC:UoԳ7ڿk +8KEl=VJ\ӺVc!둦vL`&7#ih"S: lN/R9ѼGDnEnEnEnTzai ZsK=2Lt;j,r'rUiKpc%ɅsUX:;XxKMJ}}}egIƝ˧  ւu_"LL)fiO5HÁf bMRbAIBgĎ(i 8PK/ UA~<2qf+f+Q ݨ{I+4u( <Rݚ ։\GnDeN7SO` |)0(R?dG,Ûf2T8-lqIkkr6/$1WWUIRRJbD`-]:.\rx 0]w9NRajOw}YVe"[W!Z13X Wh2e Ǯ"aI9*Ǔ"vo~܊a~?_~_dώgGLX?MGvw䎌`5~Ӛsw緉9Ҫږ>aU8Bjs Z;LPǘw~-(%2̜=x Qϳo<@~Ek6GG_vq&ڔ"L[.I=Gq(8ƽOEO)%_dic8k]4FPXr(Y@0 1H(Cgpl5Ti^SjiPb'c{ r#BnϜ=nʒ B". Ĵ(-w+eHN]Q=&][ao"5MQ"딕_I~n!Vne}<꛱C9Y@^}%-* g4/QͨҏʍH^;ܬJ5!<+t ddQb%Rb)T71Lhw4~:ZȮs g3ZÄp+ |9F%0)-$Q`A.sEVT[j-?]k3, NR`\t~S'BR,K& F c1-&(`XPn1܁tV[PKyגQЌ)=#Z#)D VRq)ݗ`!UPs}𔢴K ZadHI jdgbP /]cv Az >gZ"b:́*&^/~tyrZJpӒR`MsX^e5|XBlP~OJci4T dYKB2?fQi2>'Ύ $>G{o VBA xl0yRHA@qG@t1&e$PR0$+{bmL/ˇ '> z 3-|Ve7MZba:[PV"ۛn|8CpxD QpxƠ!t@dQ(o8Dl]irc{syuY{u\^?Ɓ[E]Nu[!탔 h?>ߵbȕZIGw "um*apS_*xyŜ!8nWbu./8IdZLvUwMvLH,iGu2띊zs5<8 x- 9sd;U]}\W 1#jn=8]d~Ouc&yx彝Le<ӿnJBOeX-wa1ǭo/ˋC,V&ͬ-?ۧ*gܙ,M4Ŧ⃟ֽBRѣwK tRQǻxVn"[ 9qM))'oz7h-I}Fcn{-&ӛލAxT bL'u[k$^;~=һ尐76 I3-tq”V5⒩]>We|3$216LUNЛONuȠ-j?݄?m>߬tim`副9[{;!W ^hS]Rٗ^zg&)voQA/T&\C54*Zm G?~+߻^fT-VnRMӷ}[.OPGMrL+Paֲ !v8#*g,X3ϯˏ! ?A;4~KZJco$8_rV>NKv:xRӼ`yv۔4LFiu-h1Ah}n|Fz-cpʧ\SX0rMXUsaD1rhr{MQ=ecϹ:>D"fç5{x=ȻQ.<9]Za|ʧ W駇aQ=۪TVO9쥴sL@sMUvcٝ9]-@[1*+CS\JKE(NT.1%muƙg}kNq)p40Q?/mܢ.8%1և8@Dj}+CYbX{r9$º rCVpAb,3," mT r1y%iH J 񫟮kP q ,h@]‘į;)D4Eε]Lq4;jxlDӝ2[詧HFĠ٬j܁bI[ͺ\;f1#_=6uͶ(˺PP!a!NBZ08;mWKHo,o[EI6Y.o-.cȺKm OFpaTq69|P9ej$~p+Pblc +'kRb#8dZYJe:EhspgD!6jQ(o$v.--T-Mz?N9vo.q ,meXyRw} .U=,5 E[H< 8}QI*pli^8 _k3=FA_f{?TIj*t6+LXR dYKB20*-R(-CF InIG J@8F@e{iK_A\blѢż{">fWmF4ez߽p-(]]8^)@Q(@HdzD0 c%[g ) _ 8ҴԡRL8Rj|i&5  3ZbtP̑ZJpӒ>eRJ]l\԰R{,X\tsX fjb SQMOS〻cBO"4sXȉhM5uNMJBR11gn}NRVf[zr&ܦB;Yr3K 2VүBaH?[OH{~?>\M? GaC&ۃ~zs~|us]?Ux(Wg_$<_>y!ˋQ}ĺlvx2\Ü. / ]ʿ p$e(֯2@"gϯRU8!NC3{i-y9NZRYTeDL>vLr^R A bFO[ORYkCg sqsm hrf^R.ڐta4ϝP4XY ,1`Kz2qceSI-՘B%\rژn×z%Cfƍ}K} Ѯ.䴎|n{+ {wAc#fOECVA/?lw9w;>Sdxw4H4@)>尸H6=ZSD10Wse -9 P ~з[|@O6uA B}Iy@d{Zmsp)Zιs|CE<Z֖ +v^@fxf '{fIq8b [牧QI2z6@f o0 DUvO:Hdw'1WO`|^427l8LnDmWb;;@L\GmʈlsGǕbM#S#h& $ DǎVT{McyKJ•$ )Hk+e!4!cg>]=@ a`6n0!8 wa~Ojo_^ԫL|^N|f? ;yC Spǟ;Tu;|~%)QpPCP$#Je2d֘x K*__}>L5}hG8m6MiףyNZ[0kʮ7%.|,)`toy#r] {)TVz6WI7k}ji~yzwo~oĤU3na\^OU,FɊ'o.bnݕ7OW_'conphrg~"Yҵ/R c׏cgD*.4\ZdQq3~&VS8ҕҔVib TD}#W<ᠰ@`PBRhS4sNMM)J ('ac/ƛY$X JR9ओ4ak ִ4$ZvVC||,9nCw{ޜu~|{M_߾ @(=y->Ga}x#<BO|{ᣚon|FW΍냀dc9ݾ%JHŹGt㖛*D. $MɁdFMx1ṶNk=s]_ޛ g o5ߍ𴆐T-.3>AvQkf}=,f ag^ikSrdln\rLurEP`fE՘'^T?2r,D(x7*zGNǩ!&PEeXuYwdW!|gkOn9szVP$u8SzaDa9ӝ8X'6s0ix<D iAFN/ U)HZY"0GQ`B!`:,( + `;RXwW-)1}-SV]7Wەo v^8~&9eF3'i ߏli$#vcOz*NGz7vm%@si0M:D &9||=/Iב)[2o{zyi܏F0#鑉$+22&| Z1:-JK VlC(kOUtN( NPNwRAL&YNR igi:eD3B&~rlb1qv&BG>l35nmͬMgGZlgԾa<ݮ<\zQsd'^AxA::: vH)L%ApCQCzܪFc֦ 0 .O1WG?nO}^E~gule'qO%,Y 3W M *Geߦv9^_C2G0dթn:p.s1hX1B3Xr!޵r3 8v^;O~4o~@t|4?ܛbŰl]#y;?OF"c2zp=m9_G}7?e7%dk6=<|w=>)>m.|)110>Zڻw=̓1ouy%_άs$!j]Ciwy4gज़o۞Q[.rqm DS!wmV مF;].<-l}s"0btκN; Ng¡ZSN SҚytOљbjYX%`̝R??K$SNL> Pln f)SHZ+x&,纐Z%PʃH:h弥ZU0itCc0X0lBΑb|Z $$&yʱMN$8DJ ʝ&RHcR/*@n=vS0$iFMܾ51wQ"Ô{J^Tn Qr0lu^<N0<`C9Î w;ދU'siׁ eBOJV=WV槚U3?C:!;Hy uPkY8h}uqqミ:_ !~q) E4Wё"{gq2ZQ$FCs+K{gR 3m,V|:au}xܒ-o;ǯ{{SG! NeǴ%W(D67x A`Z( 63utG}|Xzq-q@}f `0fXg[>9(&7A9m^U1/`DͅU Ae1F+$5FbEfĽ-K.rRosjlrF7wNnt"|RҎm5`h?C!B.HwE\iэh{(K:ϭl`ג5CߎGtRSKtΰ!ٵٕK{mзu+(ᨙJ^S- |H&N '8:ńGx)&^co7Vwx5j;Z7wץJ e}ݎ\#_76c?)__ n\--Y}qpWAS9/^Ǒ>c\ߙE3lHӡ]ֿ ˇ+S2rwL~p8gIxJzVy2`媼SKhL?C5wb Ab#:Ϩθq[V-0R!!_Ȕ< 3X^u{vZ q_ք5!a&َDbt?cTFٯ3am6ӛnln=qޟoќ.we_+}|5*_WޱzWLιuC?e>]_ͽwGnA^ VF +  + t~߯%~]} M3$hls0!!T A[5l"UBdo޾톢w1,g׫0غٍki%?IW[l{O( 2Iխ]i1x-٭-BV[`Ez$^Hȋ>?d 3|fgeF=0:3j#NEqь=Ast!αP)*%G#JrrN̔E3"SΩeVd2-c1"(`oL>~B ci3C:^}xK兏$nb|W$J% a~]Lb-p4<9´"p/oz5s9J ? o(v dƙqn`4>^]g PP!veYN&TnSU`=6D[kKԣx7 º qޫo˫x {=~GGqeV7= GJ}ZZpx$i䗵pSCG\9Lm׼W@>Y.KfxWIyUM_Gp!twmK@m5\)eH_̯#HQRyi$tބM ׾we "onLAGwOkO‘\q}-?/oїޝUr)Bn-P8y7-[ecv_Q)<387o^y2{e*nlT[*@r.p<~DB#"!v3Z# ~<Jo+ǫ;\춠I}Ы:9aM.uO7 .q̲Hs)r^=RPj4h,@ 2*yFZ85Y֢,(ufBT +Tɜ$LhH#bjIƑ6gNj$b%3*U-`hbpIfW˹!=>)?es}H?,S]׿.Op$(›ߑwo.X%n\fDqhF|sT|\J}B?onu ܭa6s{agQGQ/E>x. *$HzL[Ifp\%>̝zp|g\/^]^b4!XNߎ %,lF~c?~̟}S=$}toSjA/v`-@#Tpy'i@ צR,^ \τmʩgUj7$ԳBg+YQpeL@rk=Afo1JYg ֽ#HRaI`)sbM $39Պ+հbrTSJ%!TK 6I3T;< B'.DiCBrMg8T|nDU^J|Fg\g l΋%-ւ|"\(S!_xw2p5]~ѷΰՍ¹cky)ձ>CD4G(xwc+ ('u^ܒY|O0K_h1wVRuq/=G7o.эWWdxGAՄn5%tuw-Άŭɻn`B`H,STޙYEHrrƞ ;-ja1pйh_N ]Bbud{:#p寃_SF(èPư:YYRAQ6A$0yFp$AG jn7`¡CO*7Xؖ6b;9ʍnH|h-bPT9$ ΃³dbVfDn јQs 3FX2i=Rc Mql6^5TX,e|&T<㙰6mq2b C 8t@ TE#`/a~N/\/\EQJ2"yɘD!4Pq~ GtZM_  8ˎesPL3D$b՛RfvWmהiTS" S!1\ ):"ٜhNJϋT/waG-lm彌_nxƠ!|xyR]mi/uƧΔWþuwѩ3%m਄}u|ԙXzL n" [-ɻnRgYKӞDiԌPV.1'vm( F49S!uX\C+s}Cs [bW5[йo$#S-E ( Z(;>(s@CFJ`!,N[jܡF+AxfIYh5JG`d3 S kr!E%u.d!ӤZp1ҰRg!XՌ ,hBP_occ`rn,gL(e9QSpN&/jM‚*Ul?74u[K g\(2 qJ_bsEܔ Du*Iq˂6Y"`=7R,9^W4 akjU?5ipu:L@IAz4dM/O$.9Qpq9i-'ٶDbJ6ln.qY5]AvDW b&Ub2pApr`%`}D)c*E, rKyR*]H%H=ksGreJbNJ$H]]fvfHH @v%gII~tpPmE`Y@m) ^R0EBht>}xv=OF :~o&rDU <3#?NAجcIjwwQdh{.E3\I*,sQQI7AuA&vU"ZBwxĢ,:$xNʧ\4 ,s,'1^pK919^b<+LDr@Ӆ 4]|K,:o"[m <҉}L9b2,RPUfyB9GSeN!nhEZ{$)`!GSXX`"d*\-V$εN6^}FQd6"ռD}M8BaU/٨KxPA~1i ƚ` ў$`hI U&d-8E„Υ|rtpvy3'h(rD@i_1Bԍ6br }:kT#O{Zm{96p/8998\^gYpUƗKx?l6=yQ/GUؓ8<'1G tG~fZ`ߙh@'ӯab;}$Qyi s3ygC.Ļ33ad,~{wv Ltɧ` hۂKDvM |{`һ/ UEsLk_p=*xoVWCXݟxhMW`]z1DkI:xn ŻezJ([(Pɖ)p@wa])Tǩ6Nb`h+n"qzVt"Zї%HsVaR*drxC_Xp'O"Dn?)s{`A]a e)C4*FpDcf_V 8bNk5J.XhPo}5RTNy"ڜr5qjږqgbܙww|-www٧RIeQK 0o,CN[g5NGa;ŭa~x$Ub@0JI ]Z~4FCZ1OSA./3DT+.DU8[z3hn0ADD(qMm#qό"0b'JQ\YJ VQμN] "PT`T2&vSRC+<Ɖ _p VÞX6 p&P=F!  [$M I4G5ԁC*0U%^; S\j(n)m2V75D kXoRkᨻk׿ =N$6VSU_+!#(]{d`2:(gDRcOJQPMX)DbZ(ʽ>sn|(g_ň{KjSc-0>V63AG$̐fb%}1hNX'D&13& rI-%; h^"TQ {#eW kb\~6\\vaߦk'Qu ?wt6MAos-G]0,K 8Fpch02~$VhV&ZЊԠM4(xԀh]d19p):Vyꓼ.//\GDfkKˍ7iQwťkr{fR"s#O`r[ {L(Xb\㪼'zU76}+aO/"rD#!ES%yBHb-^3A$-x<3me#8cq'ۃ8ZGy4GLSQA0 9]9XiC<VJr$S67ܨF`wRVH5 #?~HNt.KU`4KԆ#JҞIܡəD7nNO&M VtjI$5HiIJI3Iؕ0?jSG"ss X"ZUklo?NdqlN߈G7з2Vvldh]hJmhfP2Ahى qpRa)T{~ )kELJ=FJC9Ӫȵ*/J#B_!mI*Q*B)cΟY!J*xD W}n%**wHJ-?8'NNjN,36~+V`9V6cͭ(IǴekEWkOuQ B>>z ͤƙV &7EЄ{@BQbF,-ZbK/;zJǯb\BeQ^cFh՗YV Zx@1.Ƚ5:r}GYіJ %/Ro>#WSZފ1pF޷ Kfn"Qs|.$Z$IwM0ŹD]DYn! +ҞM.)oXwݙ`Vj"h/ʺB1뫻Pu^4#^phFR)}K؈`tRB~MJ_JE8%-t~ׁt#rp$+r/v(K%#Rk-op8Xj 4YAt39Sl) n$ۖ6w-GMMg $UX7a6|IbG+6pe(f&"fr-!!wL ٜ9sE;: F| _q OXev~^=̓M(!AV95yfWjBn~|;JC78> a/oz/ۍO Ey`j|ҶG$ sM9ĨY9:9 Vxp^Psɭ`8`>C+9jy X L"0+ hKc߻I\dsr P7{~ʚ2Җh;Z#Ǯܞi/=v.gB\ sWڀZkzݽ]Fŀ? e*xkS&ty_ ЍelEV\|$^zh<[{m4nplD67K ̙_ޚ5?>!*cBMl݉re,N0M)*Rs/b?zY0<1=^M1'Yh 3&|')p_<Ο 3cGi*:].vW#t>O&Qn,@*ao_-.W~^X/.HY%HQwB`&m*MW(E!1f4yrFaj1P ol}jD&|1/wGAXfwOm  -hDט&2I \[sqFi+ָ<-aͦ]fV)xK~~uJ ˛$@G&$ l90*$-NĔ;^RZ-B( fs(ܗiڨݮhqt弃8QA_fS7ImDg"JZδaR\iRF2TBiD1$8M9bZb7uU<'ey<lʹڜD-M Dz_Ggf"s2?<6}M3޿)yƸax4TADU;!2P'_>?gpq D-<18q<$)j0BLyO`dzEb>*{?D~yiJdםnWÓ8\ubǿ4=oG`_ffkهHM;: M1O{seYYb(Eu2߳|ȍ?#~xޜxw^ùقéogb0й48<u~O6p2;G `33?NpP fBeX^.L[ne ,ywٙ}7/ }ӳǽo2{9[` jf%ڿ ~Yg]\5O~ӛWY^ˋYh._Dǿ|_ Kv,mI WHdNNXm!ӢC4gRDq,pS]]]~U|u/{sssˇkɰ7E':y髓_N?j?{-M`\ﵶ~z{VjRxEXA ivF>ht['ی^klJeoۡ7o? =8>|b?ݜZs f?3Q8.xC¸:EvE(M?O>OW/CٿxY'X./YYZYp! Gt ȾMO>FAq`~=}'GB~~arqS:z OzryQ< 3Yx8fxv=|9y[8e)}" o[J\kS!rw5lQ~(y/ռ x>] ~nØPozҎ}vUru7 3oC Nmy>tT||Ԑ4XGiOÂث7.~6}SPmWOGo`6v'Z8}p0\0\_w0fH. v䐫>r|kƈX\JP#%1$i2-ʜ ZiDzFxWZbU颳3%(6v>PA?Zk0s3׈[kKo"\{to/sF ˲C˧5:;+gQYgy; N}yۗYtt i4vGS|ҍXx}PiG9eaz{2AWti:x4p-nJlpgλYxǙ_rb{PFF(y#K)Qs4MZga' Ox@HrJMu=:M{ boz+U^#3/ʘ.L3,ט%2 ogp$Ɣod=XS1**~p۳Ě)ĚyӒI'ɽ CTU2 \o߯@VՊwLϳ䐯97HژM2$aflr2-)ip?,Rɣ5ZiF:no C0?C7Iɪ4@g&[HQk"0Tee4AN d BtJyZH%=Y:㫳PdU~^dXbK D.vfBw֔ZG>dAaqK@*Xsεk5RAP' `tI7 :6X=-reEAay=(AWcnyP~sUTˋ:J A7Gpç0:"dASN.釤+ BLA.<LZgXP< 5Wb|Eht(= \2f+`ڄJ DNQi|DF'P2Q v Y4 <(k+%s:1"8Q4#Ηl"$q5zDkQ4ZvDcd Zc$P5EVQ"DP2F{@$zŶBEXIv(`,ǽ3VfN*VMaiN]yl>)MbVs0 xL:yKFlt ,zę pK-͕BRv ]ijky'鸂Pq&';r? uP발pZ'$sK&e״p/}pqpx=JxzZ]Np6Ҿn*WrF-8 4q@/crh͠_R* eս3[>wNj{N{GG՘/4;n-P[6;fwse-][j#nIQ#sd6RQssef񬅱눧6#30bcpfF963+ b?,\gS¦DdAecPPDk"a  tı̊fX*@oP1|8o)(L  <(TsDm+(AI2O `-D]jSO-v1-tզbD4yؠv(#9'pF88nvnmHBRJW=Cr7ǖLVwUWWUׯwy7妗l.=VRՕP-v0JE8jL; _cDxD[FH``~/z!AW{).(ٖnheOԔ6\I{F-nir1^pWzڏ0{k?Lk/ VXa{i`eenlCY-}R\hKmR}U Uۮ=Xm{!<ۊc$Vzb1l,^1Íʭ\ .aJbc-sﱠy2@#cX5šVK q='{Yi(5)l,>XE)d_ߚ/ K)[P3}kxΗ@ʤٰ[+SMo`VAjHk[lfZl!i9i$&ٚ/cO0vpe",Mžchv4NZ P6`(^:Ad-^rxrJfr`.r=-rd2t-B(-ٶ|pR%_8f KYɷ& ĴjNg@F BZU{o.=3L[JdN9!Lp|d GaZF΃&$!=V#\rbSSUFBM-Ad^ P 92'F,6P@7`1qt4р&d[u ;AˆA(LE Ĵ@L Ĝ>S1iA9ePܨY MlY\-|X(Fg~g -sάFoћiћ*@* β <F(XmmV(jh^vJ̶63{ʙVJJsڡ<6kob._kcSOs%˃[#bv LV"]_΀oJ_F}5Q'UM*{ovWKQZE)Z$QKŘhPEP2BQnnzv@|bq'@'6yplbsE&ԓzCvTbNb-&b1uĂx9-qd4j8E#Z4E#>XcR"lO ˈ/sk>r͖I6RKh!禛_oZ>יDOV0Ds3ʯCwS/ToJh@B &뛛3C]  sІOU ,wǜ 's#ϥ+ΐ3:I4J p/_H g0>p# CDjH:`.Ւ@9gLL8SS`E{KZc$8;{I_Fw~`'c`"`b]( f 9 kaB&*Y2֌"> xk +uKng3I.G/gVԍ?ZelP4uJ{#d<2w˓Xq :vc_3s%٫ 2~uR'}H@ WsX:f3e1 *V$`N9ǐ131iP1-;vS~97; (kGsuG$}q?8ݵ7ð𓃇0ŰYAI+A;xbUl_?E7=S{Q^|J6Z |_;%2&+ɤ*Io2/ȁKAhOW" P/.p d8)2 F&kX)bO.ɾػmFiTPW`waJfe] RLTAgq.rY%!k=HL "+=LBƤ4F 23KR*eIe l.HXcA[Cǰ4I0oWDgF\PX*a]Lnc&xA?\W`̠Hf)1W2SֿLm`[8wCOt.!7\}ZҀ_,~HZ/=;܃gH%/)_!mI`6 |L(ebNVTiN wmu+x+ ify[E\ť*M_uio.wH(/7W {uj^WZ՚_4' 8XID(W:5gEj{* Lc.Ó,5tyq:LYz8Yoy&zMGqG3yb 8V0x*갯ȊoEv|7ëފi/$; FW"wcCs+ bxl'8?~[+0i&87Pd+Tϓlu"Qpw;Kcs^`|R6.|JY|dl>M2U?uVGQr0Y}BIʪwS4`?~u7Qm.L%C\&iNIo9ȵKPX$Ȯ_ 6ә=)dϊaO= H./Wϊ+^ES2{

    |F7Y+^?|)À n3 IzlWG673IfzyF1sѓNv[$'zx? Gξ_gȲɲ{6~h6~h6~h6~X?I;IYZfDC(B(B(™$*g&BQPsNu2WƄ I:g^Q abPf3^ibF:j$QH :`8]qN8 3Sw3tå<)"Ue)x']jՄ UI:ک_]+c#9[O ˆ_zV׀@ oGaP.U<~OW__d?zOǽ_^_n`MV~O׾(uj-{4{/~<o&?:i䝱!bݫ#[~xJw䲲BL*T}!mI QW>tt]1ެ L|8mUQεc&sܰٹtMoF[`v2=sژCFKv,Z R:I6'j>V~F dȖZ5Ìi&@$ 8JO@ʺ77bbeIS׷Kn/V{A{P^.huέSLuLJ[7&5 \$`\A&WMOco*F~j"՞8Z?u-0T_ܩ`{UI؏:EI'觑$Y0GXL֯/fNRL=_|l=) #vK% !H󪺒W^UX1NYZlK)|z}]urJ~I`,AB!BsbԫBaUhN>|zPH ~<,h#NXVXЛeTש=0^nroFf<lM1P?k!\( ;nՔҀ"i0aBW)JYǙB GQ[h [4bLYpTTnQ*`#!Z k7$O_Qu`8knE6Lz3sjl\fVVJk \L6a 5,a?Mt(d_<Bn8X? xT|J}W|BT(N(bC8 $ÑZ [,pƏ ع=D]y BaeaO2f{=r2a;IqDuZUhI', 3ȍA.)Q,ݷ_cf_\ÝW.U,h4nK(ʽ'U(^ 5IDiPK8ujIZgD1(_f^i*0W"\[d/ՂK#x :qbxhv5"`^ڝ>cJϹG cBkA\s+u)W8*uI&"48Y#)E RR-X 񖀲;(ΣCqY]06$.AȉRv }D"BP ~{f0jpHDK/XŌa5,ij%Q=GKMjTg:i5a}ٿL!w|ZJ>0渙d<>E:x!q-`t%GdeR"Rp!l̈屦ƜPnD"`gzjD InF`Rs#pzMpng48$jLYvLH} Or"E}W7ݵEqh!@+ZwpN~gS3"4(8^3na)#4a:K$#Xv88S(N( (Hpq;I2_=B h+Uc/d)#) @.}`MK4軻_CC hDy9a +EH%_綾! dRQόb X z)]U@1VFN P)o@\TB,B6Z2[)hnp[I0A{@R7t=*]Z)EL?g_6s3w._^{ RO.0fm"9B6v@D#AUZ.ŅŖtD% %j!?N7^xV kj^%; r4_se,^]Y9.|e&]/ST.7*Bo.DV+9#c{ ep\0F\+Uum"Zӳ8  ,Afe額zX0p8PQ,*ݵ 4!k9j̹66ȴ{,7à LcŻ4Q ǘPD,e >!J|auO@& O1D$ -/nF2WorND{>\%$?N!otŚk_'n}W~zAM2hЅ ZԾu~ mɔg7I0餦`ϕL;fIؚ=X ' uTcCKR'|.zU\Bom:. -J0gjo3}@gCe,~Rdʁ|] 3jJĹ\BHf@p4 ԈC]Vl# g)K1RbN"\tBzmqcQ>ļzL-Q25<ŞDax0,g!(XPF`I$rX$*K/X~/ 7w?O-,1f!Z"GHb"d>5FzZ/Ά#9S8MRx☔B#aҧ)ƒ\Xb8 K%H ֋ҷ+ (j+,B)N(b)Î^Z8%kG[ĢF "&,d!od !i fZ+|U"dJ۫qDz6W>ɔʣ4fpT TXrM QD۳iDt{[)-ܙԺ,9Pq]-!>mu$}F>AR3y׈XG^؄J`h)]-yg5:oC+=8is!tSwUZ@ kN,j5fAyS2=,ek^/ ЍF}6tѪE y]|Lp;H5ц#՚}wv񩼛})\(gnLH)/l"R] HT2qS#xeF^%ʚ`"1:^i1.kx21c HgxƕCDfxP`RKSuPO`U4C)5T&6o7 i8omrphd'lXU;FOT25O{-*2:Z,RYd g5OZxGjwvE"^V; lpIpapicQ$7k4@W81Dzfec{#SI:XJhlBiVwDfk1g)6xj)ɰEQi SЖ)Na`uD$yM5J.A&.e"Ѥ n0ӵ'IZ̿nB>mh?M Hakyˮd0b+4,^(=(bh"TRLrd%у"-05!BLwXs -طCB2ko.JoCl2`o^R辭&b7LzZшRT](Zs[A萈b;_~Lw~6~;U""ĥ|فe^Sg4ލv.pIv&NpKM~gRwo6+7mt2)_e^1(%V櫼H iWjbWe)Eb.j -&Syˏ=77p}lx@ ;/{ٹnn;!r@^qH/xM.vE^%QOSǤ!a%18k4qƊDkbc #@;hN8CbF'G/s!ã[+9G_I]κB(s>>y`86} o_֛6E_FϢI=V(>< X7HlHGFq?9oEO\}x󽣽~kLuG ]>B=; LI:2:qCNI%PY@0>0t;v8ʙ`r0.+ΰ {щE[GFo=ۇ3&9O`ʍD\?84LF ÔAFls`axhm438f,(?AFm73}aa/_D^E0nU?:%rdHO_䫝tT_|ry/w,{.jd"{ћ/`W7W'^L/U_Ap>゜{N\~Oye44M[yo]V4#x.ɜǾTo:G&'٩?QD(؇|5.e.k.:Cώp7^8,W?D1(s#u8?/pЋ%{2=QtiwefOV_ L'% O vx߀Fwo@/g*kxqe{o6ëɟL]Tۙy+m0ޮ^t\׾u~nuNwi:@6yaFEAIpr@>zX)mنni1 /`vOoygz:xAg0Ϻ (^\q,Chj9Ҳ2 ߽v2v $k1i<2+_O@&o]z>u.l?7Zv%9?1.]\R_e 'nBv:x^QHFcox]$osޜwo\0VaO㌰_\=3&Ӿ:@k.~ڳ60_׫l ;c=A)'Q>'ӫA-/hzW{Ac ?5v?#.V%soA[fKh; {/ۇcX]o@ϟ_>:e,&5 Oneb+f7ܺ6b0Oici)beJ%UzM_vgY'3s:I'5gkV cP/O 7BW C^i&) HyM hS~k5n+[>me2ջ[/cU߂w[t{nF9Rm'Z(缲D ` T*^]K[O eƛ-*Ӹl,P??ͷLlhx~nk|I+~{mXD٣Qh6q_cf6m%hxy l4cC>ꭼ˴,<cS3x]mo7+vtLȪxn,_%e&+e^$53ch^S+EWSfǚێǏkk%i 6:9|}m{8 bZ# PGw>a;%Π◷f֞vjwXʃB3 =9N[vZwd9xM-AM84b2Eݳ}l@ .e>y}ͯfoh&#ov#XC5Y p9.`,=]/!ߜ[2 Q>:Dܖ'k}6ﮓ⍸Peky.rqێp#yLY&vj?q@#)*ڋL SƮbRA z=Bmgjpym5m=B7Br'zw.q'uq P{;wrbZezs9Sɩ]g+3@Q"PJhJK-hL"$ZR^`⡉hr-O|%jBVk+ &vk ^{i5m27dz.q¼ w~p~}w"w dNh y^Itcnx hqBjH %@̒|oys9G"&r9NjU3*8PrmMe|*Bfω%-fM" ɪ/Ioza,2mރw?nξ[~_袵ْY~^!I^vI@J@'/0͛zsEe얜\lIݬ >.W5::(r6%`v/+N^_:U_8߱+UV^lm1'6l<ȇhbNF%`2D(ɴ -.haVYƢ~onVкhyt1"/(D xz_o᧿糳D_ JPy% sGTfgUrE^ Srq">j@ y.>^;JɦYAKpǟd:Ht0K Km62>_\wE_$Ϳ0Ծ;nB&Q+#h78A7r1\'G:gsOќ iU#tY[c7?_^cmѭ #rYևLݓB6::?v q{Mi}}K kXY]_3O+VlM#+䘲1EKF@x4UvM\U;1U_u즛~`xm/| W R&v:&J lbsDgo-428تĔYej uSAk4ڬJA# oWtd̔Ix NzƖ2֝'m'me IbN؇ϣI`שp"$Vߌ: C;N}R'$I['pэP|8?6Da>h7O @Ϧ0R^|#!H6[%PJ=ɠfPvp,AiIR(m-@}BiY1JPڒ4P] 9(DiҁD( ]]`Q;Ei >~'QzΊ-Cಆ .o+&-oЖqB UӡʶU/pZuhjŠrB6r\KhV2d&8KաؐIGylW"ݗ A=6l`ЩIF˜ tHX'Qԡ ֆc~EZjCSJG'뒢/]@ř8Eַ%` wY)S31MX.|.a +Q cP6cTQ,q99ħ%2r(0h&A,c Mԑ v[xT -X.EUN|V1oX,# 5AlH"II"b\ }wjOihG#RxSq#T%$у1lVZR :+%TԊMQ'~LL%@X-Dg}$|ՀlhOhOwT(74*`烞4J>>$4g_4UZQ™%S:::JMq b^2lFbjYMJjiu:*/l+0 (Ӈ [CP`B lp% lE"oȶW֓>2ۥ;M /v:C{%ۯ.hطZt(·Sv;ߒ:NPx{(-9Xz;`c9+)%/vaC$ pTHl/#7 +ege2S]fkYլhmS,*TefN^F4RG y/m*Z $;Z/D cΑ aXC]I 'd*e24 P2)rX3i_0,P*:D2cUܞ"SA@c wJXTk;eƃ)P=R6b"t*)ՒgMCkp.BMU,Ek lZU8Lо @N49>UhQ݊ 0j?ƕ4 avm k ujTmJ tdd)D6v5O}A9cI:B7 T: %pA^ZCF~馃`o`M (FE @X\%W !k'cQ X)a2xoVdD9IlNJ?ȒSFe"P9EޛGi$K䐋ubkcpsD b$V_!b+9& n_ o 9DκZ]Ȭ%WSA̡W|f+I3fZVm!qPF$ECɍjt**(1"K~.Β:Ƙl@ IE2l& <*f?7>pZjTQhyA{QhNZǡE5:SrѩDE5:ke&2/KGxQhNǡeBkt?)ы#zYuqhBk8e5z5bZf%*f_B))B'CHzZݤSVPbPҔ'3pq튠%)*1xu,A褴|wR;)"sYֹK,q"J})ᓙXPGHS;?Үj-gui,6I>KVV;%)&+.Z0DE+>%b+j#@JL%M2oAe&ٞLX aՄj#:6 $2(s%ml(^v ,4uw@j%A] VLN ZֺZU ےE>.%Ӫ 5+6IԒN{U!A =s$l#k"vCb$;]ظ|e@W3ْ~HۖsR6%tpsζRhϩm(5-7ƠzmQyypE=Jht\7 10߇_D_ý|D~=o8CR" 9t)svڞKGa%c~DkG5FIx[gABIvfLrbu)qa> ]!ρ@퉉}n.z 9H}eRxEB:꓿T dܭ=O, v ?GIB1%އXkMV_e̬#NveVv66 ne1Ɉ`\_1^ܳ=x%\سHޏ|Yo^MxYxYxYOƌj- Ǿ^毩n\ 7Q,rs scƢlgg+)E.uHU\{47`FEϛ?8UXv)@&(6G0=AZi"V c@#fiBZS]pqoUR&!T$Fˣtdu>Ipas`OVFG3ts)x,^ggԍsA֦.;z9V]D^Dv F!QImP=;0c/VfZ}D#T6T#H_ĩ6y^TڈVV =rDFjDdwTw;oVRZrh7#FE SXPCi,j1y$=:/uD ZQȱI=/ {~ym(v:_ :Ǽ7r޼ިf],P6{Wg>6$!(IC E` Y_Tɉ3_`-&=& 8FPP1/޺M t֨Xvc+1ɷwK_z}_ֹoG/{". wYB+p_UlJ,tZ)} i`"34W9X XE@_b@6)`I\O(I+0JvrcJk||w]Oׯ&YZ1_LNG+ѪO.vYKL}'_jxSɈ0y-5!p;2CJ|SmE܍ AuYל[ Ύ˭R.dwqFm`o^ 趐m{hˀVz3?bp#7bpwarG|8G!(8Ya5r& BL##Fa 2*wY"t%mxB׸d)sZG>Ė`y&喿19Wt5a pT*͸PjYo/VFt[WQ-ֈ2aZ B O1i܆n]Xu1 Pz?La~Wz2AW䍐T-ڜƔS-xnaF8mpiҲ X%dr+p!S0sɈ ڝNPF->0/7ma~\ ~*t_|{Ffk}fg%A i &Em,=| Ѿ(%n[ד6VWPV̋/dLONqli{wJ? ȿ_} }^:^ׯ&߃ؘŽppyu[2\|3UEjy mKC1(ы-7n}!zfݼs^bB}3Kw`hX+rL zqz)dm{!2(`?: , + >~ϳ};HkJ`5!.U=b{5=膚ku7uc ~.3׍?V2eNt ;"$m ʜ15'bMZg0'nNRgm#<9btC혴46 "G.!IL2,9:YPkSXYXG%{J6/@sVɖ$r T]f:H(I$wT*/=٣EPxۨR@7J) 8ny=ᣝH6C>}lk+#N ι҆?/qOWJF7٩"wnaNƸQ|VN,nkw.fm +rme\{eb01>#5x_0+0]`栫+,{ 9i\`zJsg  LHYe&{>*Hzv~^n;{ҷshU?9Y;'O~tޢLsɼ!6~M?VNt]ه5D*!(#uQr8z u^2\:w8.-0,œIA )KrEEŗ/Bč9dodD1ͣP_2BgRю0 Cr<&OKFb9*Pt/\pI=*pŗZ`0N(H kG84"}KFbB9cA]QP\Zx6D/ FbtxR$5ɡEXhкË/7\fpFA+# q ``!$P$_2BWR I1f< Fb+XgP3@ln_2B'7;9+-Χ;~JBF‘3ƭ,6_2BU7]_E\^{a1fԃ>868u=zYaWF]vO"l^@OͣQx{S9w-201#u@+Q{;M뢅Of !Qjς-~+eՖtMAi/z"d[o2!ީBR?U8HJ!^]DǰHh{,/)}=J6}.]|%5}{gi*sbۣEgC3)XyQY#ڗKF"F=jx#G!َB>e#T#m F:,_l}z 'h٣SlgUvNGD1.~X:o/k\bQ|z/:xͧ@ SWn'yS+]NWW?wyc},]q0 $NDǠ#ڥ\S²7cK  Gh,`_B`!nxey8TmV rgV?QoYvGZuywEn/ȵ]9lrOAD r.r.p AxBOTDJPْ\|9g6Ͽ rL=Xc4Zr"*qU,#sxtjת%x3qİn1YҾdlgκ?GYi#RN"Lb-I#S@ yqX oT'iRH'=M|舤4M:iǔco}5T u}݋+-5?I8sGLKDŽ'*(֒A-D,j2:MΊqNPk/ۜ`jw+d줾{7guPwp߮4xѴ\?NWom߱=&|04S׺>kk5C>#+tΘ.ޓmdW}ٝЮh dco0Neuvkv+bKEHJ:춚*VeWP9[_7Dhbݲc0uu7z΍JS܃q#``3F^ 3#뱠ͻEfsB7ߙ[ۄʊW em${<' -.iO#Aj>8+3hgKs;5a-,׈j`Z=y'0{uϘH$lzTK&Y<<ڛG])D(sv:֕uLQKK,a+;niad&VߓrԸ0L;R٭^ }E61l~ Y 66(|LZ`}ӊJT8:3Y j )gd(!_P)qdgci:uFWHvՔ3d\Y-;(֕^,FgFRF0=0  P樐>* J lFO= @@:E E9s$, Cv6D4*xq ,uU}fÃٰ:`Ú)srGy!Y>홊g};LY1]Rt''I{{5$ǒ8E[T ,=(gsZ{ \OOu7l ,h2)Ƣ+ݭ(qm[ @ !s^􏆸_RݱWne\p-?RnZ%+4/Ӊq K8+kK)&p58L!"`bv V09@E"D Şb1B2{40ҢZHj|xp7ˑb8h %*I`^4aZ< :A)EQoeH !()ut>Mww \F흱7!ۋponF *&rX|,'/w-,W~5^_s3d{w.^SmJ<ؾdoq/wKTaNGԔҘT %No P3xx ^^gw~ۜxئ+vzpZvvj+AR\TǬ>#t{hvyN8 w`u!S@eNs%WITo y; =$뇿 Ji)m`>J[z!TWrh;5j3l4~箶(1om㏡>8͒AoTޥ0](JXSC^;Zq@)mNs'BprcGLAM6_XEߝy~%%zեoov; eX2IqC -kuMvv k0^P7<iGkfږh.l pX UcK0)4{z`޸&y Nq738Nl3+{XZ+`TB#&7ꔵlrJw u9ey(vnLlbLQ(4> _LppsJ1tqԭI/\1FWZwH|LlF9T(hrpZۛob>QI!vix̫z|1Qs!)HChހ `5n^'$ _'~z1J%F,6P#7|y(}Ocɂ@,(rAN_yDW F&wKm^+OwfyDپAZiYZԉ+U1:=~`Mԛ|8̮x/B]-Bs S 4l%ɅAleScS ; 3NH%(\Jf Αnd HW(nv%ES6|R]Vw{Ф7j0oEȣ12#Lpu`xlrlSMpZńe1xSFەr6öl?J Ɣ- p06,6))P=g@XHd*EiaM z [ 9!6Ow0JDN(=BET%0ƂNMOu'zsj_ wpanS9}󏅟L{g9'Iu ʺg ZH~ߓFz0 F4EbKx$^ƃ<5[aU RF^b`M >h=u<+î Xafjڭ?ofv%F)E]_<(7Fs0S?vxZɥ< 3x8z?}.^#) 䔢2UeptQ-8<ewU y6}A(0D$#6:MPERwXBQ3kC'F5@ ݝ5&w>LKm>?h?F9r`/;kj! `c3i}T Hޣ"ǪGuяV7E?6Tc}N#i&Ve,@ZUO[a {jg*bDJ 2ǨB. BxsB7c Kj1f {j6Y>f ߳(@!~딥 xVgEqdJA.5Zy(\sجnX=TKi{O68:`~'t ~΄ZncBdcY$<`e0hz#- 8PIA, 5㔙3e%^P 90,HTGFDk %&c˴۵rMc<^G%   p^(.CRrs*4 nh]5 !)܎@!pU<ZM Uh c|2ǷS`V7K4qZ8p+.6]rP3v)awM^[^ܺD ޕFr#"eg1fр_ؙ5foɫ%V=mo0UdeJ,_`D1^ߦC=OfM)͸soS?;B^ӯKAݲ1V^oW[Ds/f4د YtOة&hڐnk=6:1|IjmBs[  bKSVL??;AqD0LLLzO j棖AzB8:)Ԑ)Gj8&;Xy-2k*8/R9һ輜oK^H쳦Q{Y@cʐk,GdgrEZbnV-@0D?8q^21<ϟ,+wo\m &0.gvd6*qpI B_+ !,`{,R 0 /dT,~^/t=5;k{jM~GC*\ƌ8E""rSi X`g]ZgBѝQN(SLo%Ɩ'zZv-I D!] 9D9Z J4TCOY evO_׺N}o$*"͔p2:2#%tS6vKӨ߭STS!>Kހd\;K'"ẀP#^|} x?UE~JH݈G|~ K +̮/<]ث߳? ߴMV95>Asg]"QDٗ& !=O{٧ç˃#w{CB?dŕ=ѶNYqJGrUT`'㗲Q MhFY FL)f?=^!>~Z(9A2#=c¬ts:@⍬UtF"äM)*'~qVBT8l9hcKE=$p.g͜x:  O:G&a(+f58 Xi0lHm"RJ83 z,%VsXLpGؚ$&s +HI*ɩ^[@w WF2Ocyzm௛J'jURɚ~dr +L+I*2z@'\\Gtcf-㳽qh~;C.< Y3Qage؀DBe4`+{~VhcPVW 7}#F0GhG uB#fv<yBxo*2C͇dcкG~5㈋mOi3^0w|k8Ѝ?luOdNJmf$zB5zZT ,ٸIT9@a1k|!;PlAf b*(ddݒaHs1!ɐ)|H)J]!8S465n$f^>exF%.,%Sse!4IG%w圅>yHk3%_ )J\帾I-RI@S-qu9cI<'Y=_KI2~~Y'1|h*5A~䝯s֚-߃e,g9k{ =1ă7fW;#CMf!g1_]Dw 6xI|뻸%GotNCxz:a~~ͧ4Ӆjf?I(KE{]8_/SZ˷7˿~ ^o<]+/f4د YtO)3@ FhP rXgHooHd56xҭYtOBune K<5‘qƒ]>M1HD҃CfMSƒs;mv}je- 8ȓ}E^ $4y+\7jk `x+f5MMTc(*Ű❴Ҵ2}T(LBä UAX_QX95ơ5 A]iAlJsDŽҨ‹7 .Ȉ"Y H'+ih(߿J,^h /Q5ñp UI-o-{sz(xH/♱0qf92i#l\y`=Y0qF8`ixf '2Pd} /qKa0uQ\ILi1ei,X 'hx9=X`v&t{O #g 0f8]}~_]gUߝ i%s!׻i1}~<<ჿŏm۫+Pd5OWNj?עꇔ)p2PG_.jm} 5d,UO8U#GT!3*8|88f{uT^)F]oT>az&;ҥ3\[k: %՜K1(Ujt?cTb8tcbTh{V;vƨ9)FO̘41b'qTTcM=Ӭa J#vGID09},* ټͳ[ 9 &X9~p_؟(P`fAM {t<9 jN/7y͇A̩R&,f"{Rۻ! NpڃsJ\ƭqm0&0tKY]n?x<-BmSQsiLba'4^`cAueJ Leo@/+欉.''Z fQL!bPcL(i4Z38 Б 6Tl1g$=-޹A"Ai8$#E'[DlA JdM$xQNGaBK823c/)z#\#n,qCIbTdf!^ ^RJ.ir GM#D#n0AZq[)qTK+=D)5*:'8y.p42B$ŚEqغ8rZ ʂxje$:#vSA#6fM͚i 0;X)c߄KA"[NɃ%c!Bom뽐١0!{ABd 9^F{ A׶@l֡@ZD#BTS0ilD`ɓCD=e䴑CBr:K 3d>?53ÍeT!FRd(f ̌ c%ZMIJ0;JV4JP6'Z6_rq+~hUl:ZIrr01E)[V_@JjCBC=Bk >UPɘͣJ4εNI*S7Te(i. xUI |''4#>tk=L~5G4dSթ%sSQh% Zsu3 ^U$b° FżD֚&2et*%4SYbڷ"KXHLM<.U`k+q()IN5GHV }dj ( Q( 3`&-A2DPP.8;^0^*:jCǤfi<{6j HȤ7S7n#\U̷Ԛ|E $4+C} ltq4(rc5ĈҰZ/="dwr( a)ޑU&4 %0A(z`T*B E8F!RD$?ߗ2@bQ9T0Ӑzn~,#39,e$3D`X˵B~81E%ʫP~0{TEvJ#L0P"čb$ HֈDA@`)kQ@C`#Î3G>-w>ںp^|˜^qXxg>nWqTK.W͐+};Y0w>fyG|B.̅f[v.lb۾9^}<~ ֈר YԀ^ŊO#sa+o#ҘՇps.mUYWm9Tmkn o_y\Pymdd-Ess{Ώ;q1KxΏ PFS Tcb-a'e B5BO@6풟`ؗT wT3â 6ʝCJaH*@Dڂ\=l-b`twU׽u#Mze#n]NJ-Yօ[m8 LLϪޒSVW*7יj. Q[%c8׻ʻ _K*5&}ts%/%fjT2c>ZiyCZ"BccvaJK(^/@$T ].ˣ(zE(g'.lh>^eyp!h0s!jG,G%ewS*9@lZA@!H4uȿXU@i{OKy=0AG"3w却u\> TЊsQ[g]W(?} 5\ p[uoY)^;>fyQ]LrT~ k@/PMJq]\Z|c*1ƫ܅k8Hmf(%R˂vL|+ӆY4z.:p: pu:ĉ@y>L;_CN ~ݻx`"4Tf"Lv$RGG A[)IUHޚ =7-/]HEƣwEwT+hn#6[Nppi?0)c$Lha:H dg4v>zboܳ';j_/f.s+Sާۡ?9( vϚȶԻ)wgkt<";Gb;rakm`@Fp>[RZ L %ÒW$aQ\ FyNV+TYY\ yr߮b' HXz6y,h$nWjJo 5Els7R[aB7x {b0)kDŽo~L0)hntM0!riAWB)nSؓ 1,)n!<;ٿ0NsᏺYQgi_%iӍbǃXgW|.v1ۉ)AHIQH9%$Vv2(@B@gLwpv{k_| |}(ū_O;}ӽ,B=GzʳS ~'͌~Fc x@G}s=h[ozx1,nt ?.5}:_6NzJמxEƣVFo^{Oo|nx?tZo{=6_KwZҽ\Ⱥz ;#[ S`CyAzϱ׽{&13;)-nyw^qg{ǝ7;Sx:63-}z] ݔlY@NOco<(9'D#n]?t^)@oO`/G^NNO'N<ד$s>OYV"m2ʼny/:O^t^x?=f^ī.C:'k߽ӣjx`Z("~ ;B=s8mgRfg*;B*i*ZIJۛ}(6=M )'ja>zI$v*GܓW@#r5Þt}k YN?滌ok۞ ǩ‹F /!әp/]Ksw/[MZbXWCY_/ݝ7[=/)zל_B;3%q۴I\]ڞ9n9uYIO|{$1QA `~hw=Lڶoعف/hb3~/IĻOӤ;J*ׇ݋ox߽p=O\~pEod^g?&xs|s]Gtݕ$HZLC, MI29dw/m/)> Z(|m4O#68NHa9}#裏N?I$O"ܧYFnvS~M#wK,~:G0q }Ҵ]2iipLKp+˓6rED( ط4"̑`Re2TXEk\FiL6Ha}#"M3'Vn'WU!r-Slk=Vne1:eXG5HENN[h^VYOe%Ldd'EH2D!"pXdR=| pƸo|9 2+0h#u.fIE\-~.KLQd_\vڡ2&2ROgɱF9 #"BC T6*JXKbd BZR"#.,X9wgһdeu;Te=^,Y(## ZG\ڐ+ ȸ\-hH5 /(NP7"'zvtL\BtUlP D^"^ Wm=( F ,O 4M1`PހO8tM\Wm'>~;JځƆ%{H^䇊&s! iC; tB|wɠ yk\ҟ^ޙ%һw|EW 1-"Sѝ#A5Pdvms4 ۻˑڞoM\~o |5"q4S$6&y%'bu@;`:TD&A;9/ֲ>oD!d^:l9݌D}6486NSxHw)O!P!wXTwm VErxSTIm'NL L+=9xI oP#GN[h@S*>D R&J JLoZ9)Nr2ѫ:S5N,7Ltj.d9(]F>(Gi4QT@StR @Vוa@zac)=FR'yR49Pް@x2:=?=8S6"6])EL G0M#bR^Qq<@xK 1A# $gd@EFh UZIFͼ9R ?=̻U1Dp:My\`{TNP΀U'rN *h/ff%ëҲTԜ^6#+X;N~q6t^_fYJ-bʤՁ[e ʠA%AȔrjzo=Ll%/-E]/e'F %|~%ݟ=J*ܘ-p!*=K%҈ 3&42#L|bZቦ!h3‡@#Ր ǻL;H?_ŭ Hkj6D+\US %zBs:9 "\V Zwq-YDi; ibF%6\Oq1D D%ZT* z*m\bO^sY67wߠԐ@rN ?u?ڋOPtՎ`ϲ'ً+l:D;(k; y-#@E#$ŢmG_I^g1dj_>@"TXH!gPTUy1B `YMQ/JMz5f0ErQe*]Υ 4aG+I h !wftJz:4"9YwE]_{2g_w{~s/QdCD t6Sǽ=29GoMx~)@G[`CDk$ dʂ@Pj׍fy*0$߷ flƸq uatOWE?dJ`TcQF;BD[ĀB;H6|?-G|9t|i"">Z)5MӳøqyZ^$5jqL턲G jU#Zwk!!Ȕ`nTk34(V,+<5j[jD6\D+d莳n,(IVEo x8` XJ*)'%j"hQPIe t*>s8#԰vW*4!|:@\0d5r83%r[>K,RR*RBL >Q xMV8`g5P \5)Sr 9ZQ*FПて^RK#2 |}r3\aq[_I]y1w!) G, htGsKRP-xF=rY^'~)_?Cƾx\w|ίly6gڢ3䷾}W^ oղokf0W–f]\ y$ts .V?>\bGq& kB_qI9] !9a09sZrWԿŊFr+&g5*/*;@^ #_6!5KvhB'KO_{S,7&{VG/{V&{$^GhIxϹm5ihJ$44a2|TRDaZkXv+β_jQdTyS_IAI*~T0zqG#@کNh_.It3lGh RB8Ւj5 Ko F_Q !@<ÈEA:t@:d5isq:N-C@)wΣ/ءhkSBBuVX90*X8 )١ B΂RtRYс񩳒ڎ+ `sy!Lk>*IyES-&r}Z /h}`Kiba31wF9y61pd&ȋ#jL8(ZQAH;@ l+\o wr,:?y"% j"fr:0XdI2J<)r(jDJh@yƘt"0!;!LdJ۟=FpOʱ ?FGP/ء^SYdeGxr]t&8 ݼב/١ |Q;ב/ء Fkgd'L/tu"v4l:j#_C )co]#Iz<"s<& ¬Z (ȗL#x oat$F'JxtAQ# 4זTDZŠ-Dg ؄K$62]⽴Gd]wW[j>3s:s8 O(gPJxCɦ]UKJ Ҕ=9` D W@ !Fd/5\+Ch왧%8f 1Q9<sKX+EGhę)Nx7I}JAݖ$[PW IM5}ow7^uC_}nqcW4ʮ-jί~ܞ'ӟտlhسY>qZtqkz/t m@UVsWuНŪny{y~+r#Lv6pus,5\'$\V)\Đ*%a<+F;7Z%&v&/{ܔ w4k ͊Y3E )$4s 7\ UE3PУ0L>XJRCvW֫S>4y7 O N5zp tkH"kEko1d@iIXtbfJODƏbNvHjܱrNUD~?{o\1 \d/pkB^1̊f^CV+Ԍܥ n}!g wtiG N2T.Ww^VW07{1-}X>u^:D,ӽ8a\;Ġ1X,un'*1D5 W-d jǃᱱݣxizwwL  W}g2]hTF1+ϞA4XNsBLXq>,k4@Pb2^ڮ)׳ ps+Dd85~>OH\JLj_5-5,^):x}&ѩU;~+08뺫ܜ7trj쳫VJ"%jj木i::OO#~&Q$Eۿ,9St:<=>(dԯApHWH9 a*eTN౩8tPsw[s HȉΖ -H? rXl>=PKJ79pD;1$6C̐HxTKw4ēp6ϢqMG_DbSTaA- 5Y>AHv/wPpdR@?(9D ?a}NDThkG @5K Ҟ7mXLH~!aHA1 rl_ҠxπpQ4hMҶsǰ٠^N>jtԸYD`E =sZbuu 2awumuiŵխkk3êAns~N煽f;yJ[3Iof?NL|§4\F ~O&'k4*\N-D[u tH9B~@)fii_sj D0ʎ@CXw=hB6b /N ƿ"o:44\ƞ%Ƌk|?w 6@*4*.UT@Fdc*b\+tG h]5"a5,#-ZtGH\TR=\B0C|{voyBp}BNI'vzd )jUW1w9F=Y')sjOʎ$%rr^@FE;zWMȦ#u<=>(I%e{Xw<8:1O"AH =0cIxs ^ c)!3JFm>QMMtUfR0,uuO;8G>J~-]{s9syy ~_1#!ިXw<59(x8^AQeB8Bk[<9Vij}\]Ba`7?*s$=WvtpEli}㆚0b2}]UR{_m?ZlINM'*x¨FF"KrTSJ UY\uAWoObӽ,l ތ""A8A  >oU(Abaq^Xx7R \WramgO[4͏"#$Sj:Xz'ӻtRskq# J0cȐN Av ˀuW: _@r?|a J5>'f}:oh4 {"X,XJ7lxaa18{2aj8{:ƈ0 > -"Єae]pc䣬 CV hdhffPVB<]i,8J6b`Ms__8#ϩ<D"uugÞ;ΈiFW<("4'l' yz}PTR"IkkhxBDWGE#7#ޥzD"vnfp&pMMBX,rˁ\TRZ\6o|$k5ٍn&7ft B|E2rFk@l3?ݧH`:bS=ɗ7#rݽ( Lebm?O#Le#^¾y7'u'N߮X9l'7vǏ/,t|V\|vu PN͈+mZ? D`k?oКVIJ*~G :Z*w<4O>;brsShQX}W#5YGɊJlZ[fV A}=77oիr*٫18IL$&a%f 4I cr2V@3rlȔ./.5@a5؈"&YF D)"\dpʓHiQ₃C.=t&]EB4ފWk򧣷479v2+Tz+Bڃ1!Od, \;+Mn*2 xS [y5~~"_x bv9 @Dj=ԗ6Szx+c@s&Vw*VwbuR i")y-[Q*:ɍY˸]wlv7W/?rdt vo_/ݒ*}<ݝ;3r,dEE3 IQhTҼP^uu0g,L%R,rXp.MeDdB UJ@t )1l8U=II VMd:Y\5W@'fofցsJ&L d: R%@2L*5/ت!%,An_!ȁ]#p:Tx el.:Na))r3R31 %%Y ˈ(Ë)[RfvEatGw|aK7v>/]/9Yo/`zޜzAxpppA^k6F5{? =L(lY$őWz˝YDžD7=DЬRvmq` )̧TfE T3C1p"eZ,cw&V%R9U&M=t\/wVym6>mْ`DeKsÈ%I#a=pv"p5r%\I*'r%qɁˎr%)KO[F-R AͻX3ĹʘomNgwAˆD ¹T&nkvpipTisOpB %&Zy}8S)4 ͳĔ $gR5B* ΋^~r&pj~䔅|]U˴Gb2zX&uuae<, [~4XۂviO|xtX3X}d~,E%{ o + fJ1=bM5\%vwP_sƿe9Hhfn8/΢^<#&CnM[w%ah{ҭ hku|E7 an@TjArA1!;O-(8?Qp素pޕh F"89zy^* 7&*h6^ڔ6 Gx.SN=IvzflKXNjwJ]zdQn/yBKuƸ*S\>xX<;e`xr=tx͹x*$CF'Krkre^K,"pNj4E)yb/N d(VLpO/ޥ3,I%c!)@q8]># l+ Է!׷b_+g|G*7O?h);@r s%:4<~nP[ 1 $VәSi%ۛN!㬘}iص\f\]f f⮯1ZQkwIczj1Au8/ , to (8a #ԥ` BbXO ^jk/5яӈR#ٻ8rW%cZd-OY8@,galKFA=]Z3ih<UXEHᰙxYcYMֆ)NN%phם:̫smLX:@;|]Tٹ$s3UfQv=#ێw hgvG0xNp#1!j8⓪I1uS5-.8kwJH6ϔ(^\x{HEv**V2YO$Le%12`Y})8gf< 3={ǍW1QxN^ ڟMPI|=Pqlh|J\AZ. iΠDRec02y&3\IMHJӢ8 8Q09<}1ˈ( $J!xs7$+kre\车Rt1b=,U|~Q rЬ41nlWRd&,)aˬـT]PA>oZ3WPWy^7I'i$&: I(fXB͵ٴ!0SĭV1)2OP8JӢC .EG#A1)LX=NuяZq{C!YN4Rq JX@VL9b"fN2 DfE0[3uTrhCL#BjA Z)+fnOAdx7q'7&Z:sG̍يAM1<_x fܘ͍9"$S|ku_!KnԈN}=Rͦݚ'ZƐ\DdJŻ{ڍKcZ11h#X:Cn7m EI:gK=nF[+5:FvS&i'ڭym EIp`MU%ժTo4. "V 1zYGDZ8nQkq !?Ȕzd*91`{YE2i"QE1*)R+o$sLufP>/c'l &]. 0`O7M<*gKiGg[E+ٓ.JSTTy>+i|V",}Imp"=^r%A:}cI$$O!E[ey wGS7n%,Ż1öD߮oWȎyb^\]w\}ژ㜕[}|Lx?xKaą\N Rf w%R>*{Ns%,MyEnyo\U.tQ^vswULy&h˟KWɿ__Ok؎⻗3>*b^9}@rzSAxP+|J,Wokv8ު&+K8qe\AѸ#o;FnP[v`0XlF=OÉ7צ!LJbU򟬔zR˕Rb"PŏE&h)7sD{]/c> v>OS/i g -i9]MqFײْOW#}KX0tϤ<%xlhk]`éF) $n"h S="K5*kOKSKNqeq\QV'Ϭ3SЫ`4N1חWJyBs((f-\%l}SڻEe?[tl7KgZ_W>r>͊>,|FA*e46 vIey딳:nwN0fdl #nKdn\\XxͅSU Gw/ξ/W$d}CU5JRH(6C`σU9Td$g"g"'2VvY UA'G/L<%ü#8bh2)۬:4AP Yh FmsSV%yZKB02_`;I[yd(#$+ 6,mz'aY5̃'QV<_gg09B(:N箊]*:wչ;nUZ#>-7_\rPKAa,Y-6<v?f ȤY^FlF:64! {<5Ycл/dYv/a^W~]6zꚷ‡e'0>^x~fmK[|E_%تPhhϪ8ԫpC ˸;~KVR٪MKn ؜ o\4b[/UZKVJ9TDy*<ǭNW<-Qqh5^l)].Wz [(_`͗)!oSW5>r}3sIR1Wi:8Ȩf,~eYUC,9024bLMLֳi2F)ԗ *g9gg)zssGl.vm;14π^(q+ f_d6'Kۦi2Y8zm1;>^Kgmp)Mw~G @-k;)5Q͒ٹeJ*5)nӡ2%_;;r0b?=:e+a)Yoe%L1%8?cRLP^qְ  Ff:eÔ/E} 9CGH6*0h`J$otnHEKIӅ. }&^>|oRh߾9#}y?c[CoA?<}}[f'lӔ%>m ^>I;>'dTH[5}}W#055o !X.TށEj٤~>uɰ G%ê|ɟTT|`EC&VL JK쌁}o.2]T5>twq71'Dfų|*Oeʥ ||yw G~z?ޣ';ǧ`'er?mhF=ԣp s?XaGI& >W4wf|kpB=cmmɘ?õ8^_UĚf= To903; G38Y k쎇6?JX؋yb[! NYW!E#a%uJ$yZYf44#r`#=b@#u(cr 6۱7y7%ͯtO j!:Jуxx' I{- [BB~PwyL ]$R;碍ΚԐg3Pzz:e0 t=BK.Z*ߐ joK i-0E\h8Rps@%B Rx] i))3HK Vv^+x#f4w)ڌ2YklXD:J^چ-]dvĕ2 r獰3֖g$ I8rP3uܛ5JdFɷMdHLƐl{E`*Y;gkV"x% JҊdږ ^:gPD!@^"l 5 W6hir抌HFI#R3{s^qAZj[ʓka|qƽ %L 1(u>d (JL2hNff6eڈH δRZ;"K&83G*g(Id%2ꁔXH! 69Kf-tks 6C*Z'5hhkd0iަȻ!aYuRTȢ O[ݭZY(c5DzLirAx9ʹFmBe2liO1Tf`4A(;aB!(@'ڑ K ']ɝd7M,p TkJb'WgW_?n. JFeQphP۱[&@?{ܶF_B$t~8xȒJQI; mщl"ӺiSۢI<% ~ O9 I_kOgx? ,z=isПDSrӬ65P5kp ΂7g.*;4~5}EX/o`u(7j}l,['2ݦ^7%8n[xW!09^ [)} ΅ŠJtDžFQ-d:r,H>r߇fl#1Bt rO+O/y߅f5:oUsqM7`_ltn_mN¾0_M8Oj7Za&Ȟqy8Y ۰b1y70yz]]E3]B`r-DJ8CPa._%ZX5)2) vI QR:(:`Q';ޅu\a#%C* AZ@ك}sîqmԲY70$>|.%SӜKM|)PFR'FE硱m_/ѝ澏cf$UUw 12:4R(fë1fS,J0B?.!DdUAiTIGĠȕ"HA _45O)ǿK@EY`-ˑ5#UZxwH"_1!R FHaJ _P&1;KsK"%2q<}߾_.Ndrd0G>ɡVOr(qFժj^CMcayrhVW$6lx Yi Ћ {aۍwh<jn=D0=":|@*:4(a2h\qZ w&xٮ"C"u:{(kfMQ*pq@!(eݜ%jWn=DVpIh$)>vKC9؜FNUe<ܦNY s+hD8EXHU<9Nuh NiGP^ْi! sUs[zǩs f9jE:-^H&n%&+mǢ/2xu{p=5pKU[ơ%uV" b8j9U I (9LIQ z!a!AYlPܭY O< C1ǦVE8H:{[/OU"!CQEPrHy`8a@{x]BRf}nY>籝xKnJGIEC#JMQr$~KKVL2\սL;(:hU"=ze2JA2WCK\sP=%qT_5I,br0] J=ŒertzbU;d͇eeZ%%$MQ*k D8bI@⥷VZva:}7KC>T9FeS1W($kLKdD&A7*p!`iTPP=мFJuR`3wulXԺCFR5>:2L X2p=Mdhur" zhr#lVnI$g 52ڃR?0JV$&7˜#oLR!$Œ(1yk`xfHz(az4b!9*L\`]4&GCwtl{_!8>t=WY^qc %H+2NYtd6K-yÌ>.& t)SN`3eFIUꤣbc)]nqR.8 ,݉ fQ& 4n&v&[Hug1A{najڑ_oUs:5:oGk W桉0țv\M~|^M'q5>;e{0Fk_iv}&Wg?*/^\gnVoÖ %~E~Nm9p,Z- Uۂ .EZ0%HaP_j;14z-Jwu/39;wP9>Em>\zkw#jU/cP-u3t*܊"FyCyrc:WrlɁdƋGD$G;2 ͏z2e8wx8RNMOuC'NxDN/IR??^o_}$MGw{Nrz|m*/"Co&|2&b1W+%WP(TXL1hQ2O{gp)F>hq_j7ʷ7\ :5)E(6o4nnV[J)Y :^.(<ڕ k||"tW4^Vi,ͻH)C1ZhJqlI%uN"0YR} RԮP\+G#FLS^_O# L,:!(F 0N3B3BTE!),£0#`YRQ Sc7ri܂G/Ncwlq~5.XorywW:a3F00諗WOE;~$/~Cu`d竓)T"LP7W L~6z\2z3W}*4oOOJ@n3X5ȱin/7OOY|9ؽ)e46VY^ b&0.vgft:`7oMaMSA1?;.䤻~sՔB L¼c)se5k ~/ 4f=[,Ѧ(s7,5 u;p_h䲶w/,ӣb_ES&TmӮ V";Ȫv#iwL3{Gz2VP0q/04q8\awGN"A\Q8*l<7O.N|pP"e}rvsE?sڏ;0g[FÞm/ޏ;7&n\;o%F >;χ [88[3Ou(1ͩxt&7)$O,tj&U;9 kȊ9pCO+4ڼ=6iNfJS"`\rc9 X"AcV*I%dN}PdN}'hl¥.C$qWXK&q!Lj@|LTS'{ s'wE{`IUAIՔYD![%(#҅̕Rcl*KRRHA ! к00X `_LVw_o`ؽzDKt[LEu ZLՠ;Rd{JͰ0 z-.Fy<L7 ȝ{xt0I<|5aO&ҧ#14-sK_.?, nӄz7ݦR{z7[wW?Ͽ|Zl?o]|W>94˱`gbZ*%?Vxwm{e^-ϴif L/5L2:b Hm F?Cmn šOH+m7I4=2"6Ɏ&ImG??{ƑB_Qj%gÉek-[I%!) )>dAYfꪮ t2J] g.%Jb.Tzp? `[¹eB^z!({S>DNN%bտk; F,kQȁ}ܒi9xy AҫJО-. L'ŭGt j& ӾYi΁iMDOF"J{\Prb wWlib&(;y:9Ϋ&lr*ZYus9>YAmkĒ*ADZ.pB8TD |'c J$+8Ť.0ZY+.M ٘Kѡ]!/5+\uiWxÓQN%]gX0j  .Rh=O08 I҄Gca",mWB\ Dc'XiރmWӛIFlw sIr))Y.gӻƔ.~q|)V&_Ie}^f{ e6mۮ% MH`@Vч%^=D>8䋳O]*\PL^vnBz^΄0l^s׶W}n|l0'};4PV[=?z Ep56+32f8 [Wwu̜|?$Z {R]==jۡEmyyo.~z5tBETYˉ?xv]GeW6T(TѤ&~~79;-:f&ĩǿ0gͻjs,_݃5iQʡ&NoK4UEҭUQ2kWydqkzat38kr^3BqV` Nnʠ:b}eFwں)-nC hR(:?[{yZ;~J~XlK7]Ƶ0HAvMv<+nN+K; RVH.|Hm.]nTv-Eb|R3Hr5u3Zw_%㌖#ܥ ˬĥ{+H-ǡinGI}irtE.9\=X߭[=XGr+޻QL@49xQ^M`d(vϭm5DxQ\}a{fD]M,Шpo \LV@O5WJv n?wA4mǡ&ڬ ;wNu~1ʠ$?[.%[ ASXB0BqVaJңߧY7Z,AQm];:B 핖o5⭬z:;2(N;XCFB맄Z!Ct SM/_(: [?Yac0\wI&]܁1F=&z пCG?"$sâб`xNf {rm6Sz}qup~{FgҎ.x[+u!庸XՅVrLCytPd4EM?\N֏mޝ4S!QYn6h󇉝ϖ?*f0~Oɏd]&`{ɞorFg}E{ [wad)Ma&7u\%Wq\I`mTIKcND9y1& Cz1舱t&ʢ aE'p@9h ȪK=]N#U `?E- 1TԇD稾xDiUy<)TfD}kFQTjlK4JialzS}NV&(}(-;Lϟs $ Y:1f ^yn(dT(0<2KrR[fv1QHkb `mjP!arz#c^H$TTA`:yf]`#T*F>dB]p#+F\+G|$j d@LXϙB\YN@AK%bB8wD MζL+K +QQ^P: E<ˣ5u9#cD-T#d4.Q YGy9hђDSA3o_A4Tmek\\1Sj@8:SpYꈀ @4q9T ڝ49ܑE)s$"!DBXA(W>2T+ ]f)N g5V&̂J;H ;c%4]h.g*%ݣmrQ-kq.B}PrAVYk'BNB2 ˍro3v`I0`|53|ژKl(V hRr!rTp QI.]rrJ7r X`$!2iⷸJg9:ȵh5gV|S IO-wW(yyD=p8%+5y"]ǕQ;:0E^%KXh,D&D= ITrD\gS'N+V#HAJk&z( h=EGDQAbmC&)W%vYHuU\2Y6Tk`'jO< BQۚm߿|iND夫Zsv~Ы?O:NBV{׮@#qs?YIWʌ3X?V!|\-.{1~|[ߘ^9Fv:ǦGځXؠ *8"pzUJR){gbZ+%-ԩƽ}+upB[$1F&Ff:"G6R+PT|w ~՟*ƎfG|gS`O|G у$SA~xiu #-LlkKA+Ji>$>Wsspr87cDbK Pw;DFԂ/#rc_*]&gV;`Yy*RX)Oa4 nl^x *V74M )TkYh&0Wyo"EmyW.NTa,SVRFnV jigP]PH&V7J7P"WMcݖEN'z`=<2 ^nyo҉V8 `LQѪJ>P^N/p fL5]K`V'br/\딳ʩQ( 2 R^UZhzE׮"0fv^m-ݗVK*dxkSϋ> E R:Ɂ J@#g 7Ja{-e un] 'c=Ih iIXa!(M2iBBZ>jkPN <#-#dFCjƷN"5jÈʐA#XF]娵",:`mH8eEr$I/iр^Tѐ\R5~ym1^eB_&5(}j󏯖:gc 4_]\Df%ZyM~G\}%oOrxJi+?pFe2-4iӳw.*A=~{TK"1Rɓѵ0T۟g:j4c#ҸYݧO=Nm9FD1Ľ*}t7=TYyaLsLU+١uexʠy jC}. %r,;jc2+qpPs3&VT"_շWDjC({zt@z6o{D+k6v Wz5B1AGXNz/}ѭL\8y?R*ឭ }'܂4du~Rkºf FF}Noց Bm fc|enV3e'jD[/G)VE{#Km wHuK7|ڜ(Ww8yX޵5q+[X4UᬭM6Y/S*,Ӓ$ޭۘ!!EJt*EB3 4z%K״? ܬxitS}g/~x-3;VN{6޻7EҩE !_kk-݊D)~f":F٨U2uWJ,K_X-,ᣏYQleS%|}P!L m~eJ-w#uK#ؒ1{#%KjР'`hݒ?5;)|e?@GY Lmwoo>^^)mɰ&u'X+x45m/8K9RCؖ"yk=QrZ&95CA,/( PFbRDrb^jBU=gYBL4hqDž% ന֎"˅KS, gMȥMA!5gXƵ^<(<ԁa92\0HÏ!Cj)nIlxcR: : )^" RGCaPp_8,9f3G IHQ '6ݸi5 @"?KM %=A\t 5C^b^)g3ܢAWN mK(:G t4ܑ yB Vm1JBⷜIv(C ?.VH@Z42W\^)֥J7JLC)0uOI}.ĚPz(vW"}OK}.FXPz(M[MvC>(ob. hXA\!V,AicQ.Lm,{˂aD AgK_P#XdRkZ: YDFR<4EW<^jA$m .q gD0 I1quA!L{F;# vUԌ7 "B1A4K3k$5ƅԁ_8n ֗8^YȡoR ۗB HO+G!u μ7a=E֥RŭZEv<ӥZH-Q 4LvÔV7Exxz)`QBLAJORKO;0njR–'YbBj))=nbR\zR,PZJ i]z(MbԄ=AJ ؇(NX]cq!!a%bǨ#k')֥VRq?VZݲԲ~4qlıF @SR_K-);rD_KDjrD)OԊ3'vC6R_=Z͘F)!i(%̋ƺ(%$ Qj =98nRRʪuJHBӋ- rm?J L(-iK$-\{LpV'Ab+%M0V|%qk܌nf>ŰJ?Pjpc].@~:(q'~p;gg?ϳ˗!ZH?] fnڢdټnvH9a>tsnoo Qݫ*^|2gKn|Bw~:*9ۇiVmAmAKsOo&JOiFݐS,iV&_|6_|U%ZH ׋36xRV,YXdvYy"kRq8@xMWo1ג@]`nuZ,hrcУm3( tt ]o'ꖡm̃anghY3wnLw ҟ̟քϝ>@F*~2 bl9ïsV[3CTK Hk9"HMרέYo56?[̶k$E=fY6pA43:-+ߌC:kq %5T6 mg?Kh$vdJ>;; aZ?G.J"ҚL~JM-ky78ygfvjk UbԐȕ͛Cr-dKA:gHтjr w -ӆ4&(OÌȾIJo myV(Py| |\F^{f2i"¤Zi9`L;oy:r|Q.n(5-=O]PBǾpSJ^5V`vѮU0%GWVI޶U|Ԓxb6&[b7&[ziyLӞrTN1ݙKr3(]j?{.'R!Xihu7׷l΋7Tλ<#cq״? ܬxitS}g/~x-s: /|u~%A^?Gο5%F]Ck+|7;7-9J~m{.5_ed^ `)=;0O Kɝ.#0~vqCj7j_0ruy='+?}G%yE|xqGt֛x%TrKv]Ȼ.e]8JQrOO>Y͔IgG!)ֶcM2侣w;TyDB~pfaJw#uz\@'M?{\_Jn] hkLi 1p96bmn݂Yo͗uM$#C޷B8 XpEvwW ait)QRuk_D{gvr{8&Z..^i&/#rꗱI|xwV]Дexyhk]L͊j]VEf>Re2l MB8Id mlRxhNl0 Y677J=eF Lh tҺ #*hW9YߚQyL>gZsy8iʽGָg9ixTƨRuܸHۓ̴߇85}p(2(rP<#i4 (?L? YC<ՁYj`+(Tz#"&;f= ,1O@JcSC8S9N"lz]'XD*xlƃ%h:WT/ "nצxVto㹝Ov~q?.1|1|1|1|]n2=Gjc3P ` 'X IgcCaXfģww#(P^y_ 5,*8@watTJ}ǃhRn|RnW.6W!_6I9J|3 V~1(/Hd`Yf"TJj8P /6^d # D-!'kv( uTkU+ y8Qy4E);*["q([k(b(M JcEa +=QFS;&Ể PAxo#I0>GLkLVA)'b+]qZaHW\7+^~0Wz_S4ĊjP ItŕМK#*`L# $F M+^RK+=/>kt{tTPdTEKlX +~.8kJ{d,Rv~훳XH\ɿgC{jaz(0Lqt ocgh 'Fdzg.ޤ# Dv:mhj%f#^;飚mzC+4s \7iJRp4<K{'ӼTnbtAТTjg/b %,: שݱ\W;TBHFa5"r%{>;O4T@`siȳ:/ԡ^& .zpnooJƈ?=z[vo9ݔFgty}.}r`}n Vl7DZ=LbnzOmAKKJOo&Jx(&mn$ﭑr2 I5FHIj=cԇ6TgiVw)y'wˇM-"zqxO7n s=oodc@(W ˿n'_ (W1vL9,t<++ޣY9>n|[ZL?G}ywmHeoGwJRv/ds%) cBIVrdok*P7޻ɗ_phTn;(nI"k9H"̖V3(KOK}[ZK/TZ-JK/as28, k߫^4Kc)0Uw&f)a,ͤRR6{.C~)x(lK/ X r;d)0fRkYJUK)5`)oR+YW|^ ] g`a,,CL?SRVB;QRx?7 +Rw/Uxx6MbgnGv5/RDk9.@?گ[qzFsA+U>5,"Z;H<;$ZkC qYTɴ(&!g' {2^Xydy&v ʴܑ"Sc`BuOlr?KuU6&?8@e9՛gI|~{k-1 m=73Sc I4?0F6f/c՟oqxWb0:dA,Dc4Bg<"A2!v9" u6n|ywȽIٲ0 /~q*-dUfab0Ѷf`HN4Ӗ~Bèax 6box3 NCkpEK(y/71 p0QZ+Iae1*%ڄ8 2'baEK93\DhR@aku,5 X.դvjt 3b;*b'GϚDJ9+&%D1qJ Y PDB˴`SL,PlBofI/lk@RAI{3wK|-+ښvv_itgIP S=8qmW0j1]>TO_X9MDU63Gas?l'ƌ ]g&+a<,F#eK!J%F5ޙӥLʲQ/4lU IlMn1c^#\T,k4wxj 0fW|rZj]F'`L~϶FA D ^f`3,,SF" 8д !$t77fҁ%ER>Zѡe c)_C?XzJۂԔ0.4p5| :VcɈ$%:e(G#TYcDsDAjxŲcD/VXۭ*d[8 c)ޖREzR.XI D,l R`FB4Zs1odOn':Ke`(]Xz, 6+pF|rI=L\6Kc)۴aP,edKh|^e瓒:/Mؠ`cdž Io & Ď|._sw>C>`RBSV|j<||,Pn2N0@uwB5 NJS݆裡'Yg}a9)Jxt4J؆7mU&i(W\*xHx% 8A]38 E2j։R]!5K5D":&Jjc&J81,$pss4 i|x1.ϷqJs[Ug8g zHi^e3_)sV_ʂx(B;ڄr 2L?*TSi!`zTy.]G6*p jB$39^B7I>` WMsꔬ ) 8jpbuQ QDTJ2GHS 1eb屒eU jDKuG EY!XPSQkA4.NsJ"(Nue\*$gD2hRG$oKٯdskyK5//݆O] \DWhĥzzMt.zY 19PވY; Fqq ҥ e@F2ba . Np5MqIt @>Hw/ڬv gF{ʔ6.汔'I84(ڠ9A';T8"ֱ,E10I鉤&u<5ؤ1w4vJ$!(`%qű4MHrTG(AV1k95Ki fcmmiE Иܥێ=A#oVFh_JH$=ZRY \yiFezNШ  <ڬA#E̼nVStT&ZW"#cYP!$F#R^$SG ZwEߌ~].Ԛ>=J֛߾{| PԉϯX&?3e ;u71q~g/~+4*jl>^VL\-v= :(f2"͢[Cu}fBBы3G~\~};> >{LCWKkds ~zSJZu$}ɉQus4MI-XnL~DW9S##D̢+-Acacg<2j$_سI|\'12<J;8Cm DNb<Ze*>0?Vn4֡#)| cG'hqRjo鑩KRTc8p;O8r N&|?_^Qsid檑NDT~;gD_rlKc@R#ض(ND!8!Bu2 Z^󄆄 vM!MIA6J5ɞ*,7>? .pC *@w"tL824XQo)y b}Bw勇?iͲ>bF1#Jnmj%,r2֢͛afqZ@-$􀽄 \۪Q;ZNBWVB Q*dB Wn&%;ύZbaP͒Rv<ڟmOdۓ+c=>T݈_U - @LժZwV<9hZID%hY33ʛhf\;YY%()S`xłOqalMkQIJFi;2v#FhŎ\V}r:db/v9XۏD(O#2[kڢ,uqTc}{ե2ypjBJ%[DŎ/3{ RC[^2)nyWjy-/ (L AZyPJog^tXm<jVv&ΐG}:NZ5KwҗS\>w˹ nZLºnM7Owa1GcH_-PlkK~T3ηVչ*VJ!, _N2w2YCLe _RɌ6R z3$ >k52i!8aӑ=Fw;'g{tLmٞwCg3 ph֣!'u+q0692%!P\(/5+pByY ͆e5[ߜ}KRD J]#R@शL6*U!(j46MYD4r*~$}12]ѽ߾{W%oU:_~AۂH.:Cp ;g?+w "~goYެrյ#E\HFozr9x%.m`;7,u@Q.zgCh3ReF!T!@ҌR⬠4ceejӝ>\s͔&!pyy:B_0J648sxy(#sEz5 ?|wߖKl")ˈR] .l_|(x|Ah:QDc0ʌ1~Tccl),;^r5B> - {:@jVAsebxw+$|ët3.,Ǘ*݀>[3D#Y4g%KUEKEdV#  J͠0XS\_wxʑBl}0Z1T6Vw/]$NjF,jse:k-3&> NbnuQtϬ!_Ih%U]OYshݞK[<8\|lT݌V %dT؜)#4 VDniuȩvKlEnLZçWy2nqz0[aZaa7Py=jQz *NR $z_Z"qTC5 OU)Ӳxc!V&H[VK32}шFHS=XdzK yTmfvtRO!bCoDr/Si3C^8E8e3έFu1EbU-AeFEPwe4!/ NqONjYP7eVyî2} SUQlW9E!#P?rsZ\pt3L6POpR |IW@LHI[:'%W |qk"TUh4OopҊ7o&{Vg|noAA(65 X:I C)PLJe^fD<9JT˒},B!в+Mm^ PdN-(x)Haa$g% #S%L821Y˜S9- Ce7!b i@GDfv4,(M5@(UedP͍XS8E7|, @Iv챬s'䴊| ,ISs$ YB)dIPr<)i!H wm"煀&s<. 'v8 if jbX9p0V=f}ܟ {D'`_;[%y:y MΪ#j3~|b%i@Kb̜)F`~©)۔f9mWec |bGoRFH03}3MWQQsv3K!* !{gv &M G;6hN_LO-no4|5f8J!)):4 N, 8Aaj-o@Ra2 '5>xK ܋Dj0ȣ? :jCb N_"tp10iOmHhE9'&jbUoO:mA >7=TG`;֪Z=j#'R ÁQ$CZ*-Eo-W u'[釐ŖZ$#1RoFhÂ8`t+0C^8E8%䇽fSnP'!mT`-8gt!/QjqnG7I b1N7BۘC|q3uN'̜ iѱZ!A7wо; ,XI`[A)(bJC 2WN'JxN{69} +?h O=$nS-Tm|oΞ׳|Ѩe yۉ[&uJPI:yFTfe)'ʤHtal ?n0NP/7Пmvyv\4\b0D8bDJ7fX]ܖM%fw68?\ڿ5g5VԞXnviukf"%ٗweΥS=w1※??K>@gsVݔ %T(ЄB`|gB|LH(vQPp@ii01q6y R F") Qb٬2a ܨzѴ*N6W~,jnوO^4րңf[+q,Z'#K%Tc)NCV/Vңf)zX R+5jyb-|Xꞛs`O^lYN,=jZzҺ֘,=dbjңf ? fO&RA~,y:Y*KXjKYhZMuKsuXzEjb괪,uUp,RZS^z,S Q$h,ji%Y*K^OBZsqR~,EXay) ?VVRzżb{αNzѴ:7Kc)7s&qrՋRS,=n(`ߙ$qTK%"Ը }"v9E̦acMc5ׁ>}=u,˹Ke+DU*9556UqJ yCq?K.~sGknirJNi߄.)*Q./ Oh\bY^@+_R5z2}D0V \?DAi NXEp;˙\,/]Xx/{$& }^9;IFAk e/{+셮V DRE+E/(AsnHYjxh7wj^ۛK|Sܽ_a4Smhݞ4 l UAZ.2,s=MR7NMͲl cu@]IĖãՆN2$GR!+C30 `<1TsaGȚQHͮravh˒~&R CyB c (.b`$2KMU{|Ǥwi}_?=V޶ߪpwz fx3Y/`~,tq3`61~^}}ǟءbywJlwT@~{v;Q΍Wҳerw:Px3Vzu4H\vNІoNϙdؾ7kpwԦLcsGϹ-VsBj#*^l*rw212m_?1^ Dc'mxs͒(TRiAtQ[]8wr!c4pQR#T9Fr |L8R0Q΍7 #NsG[5[Fy:9BrjŹv[fCo W}b >ŁX¨iPࠌg7@X yoS;mSA ]bX7=≻kIC/>!ܡ&q{ g;6B{a^d}n\Eqw?o/HE-qOj1hZM&js*qo6+l"?sk6z0\Yf\̥NgssklRwٿg7u혋Mb1pe};{:XSC7Vr/olJtw?J6q,UWczʑm]7>jrC^8E8U+1Ցz;)DX |6^REhѭ pp 3qnE7#Bt:nc"u3uNȓS45ZZ_݅PNo.ıvvh~pF&>ˋwK ^ ߯l.VʻZI\]AOjo~Ž˺:5TvEj},Rq[hZ>Y*ЏbUITK+ IY*=cQzчz#rsGv:Pmudl ׮'S .-\rH6%P':K7aKL/-@tuwc|驽I6ԓT_^A>J :ׅL%97yNʌRɂP(]FWdREYqKݢx_E{-KROMF5 f:/19HB}SYEǩz]_d|2}KA>A:Il3Z'j)UM'=Z>9$gZTYR&PL1)%Pe&Z xn?)] L_{]y .EG 7~1RL,Dh,<)%q dB(*%Ӷk&eQH)3c2ۂsV\#qfF&/([LH'IP|$;22;_5FoeƓ}fn껈#9dbj@QA9Vs6{j=fp-0Z 4}[9A9=I[N3ǮkKP3`jt%D38:ܯ=/\C|oUVEWQc`[?x BNL)aݜ"ZpakTkR AY k&ޚn3: &'1<{ $Mr!갡hT{A4ۦ߽b?ܷ-L:]K6 {Sî1]jƴnoAwdUnE9NJ"t`gڟ/$&@7^UU꒳˛.<0ke+l0$p AJv-j8ӏo uR\ BHbQwGqç y@+h )gp#ubmgԉn=t Xo-`20WLidG!oD74dtAt"Q+ 7Wuݢ3B[ ym)eథ}!w Q]@~/R+b,X>e}N1 ϥ(PJyisᾁ֖#LJ:C#BVn(']0nY,eEy"Hςfv RD&huaٍd'o .Ln#ӿ9z@˪ZVZÍV$ΰg6]r(e)pJ[ -D'TC{ .Nn+"ŐJ pY΍A&o] ŠzbDޙΓ+"+ rlCf:;eMy[DNhbE(vE{L#$0%$~zLχ*ۖ4!:lqkj.ʍxZ+yVR!buvz'/ËV</ߓXˋ?'wa{M9dWs ]iZJDZ=1Pms85( qk ttm %M< cZĠJPj,SyRרQPA%yR",L Fт B!#yͬ iq2۔A=mk_c /ǰR@kL,Hd݁`S&sin42%$#Ff9a[h[6S(c/p4QRθL{}աÓbXiMV]ӄu7IU+jEtN#yAHMQTAH=LٳwOn ʞu ѵG;ݸ~U[>-  fFy4=M5YCqK=;x>ΌQ& [=h}[ϰBqRd{n\]Y=_woP&gCݡ"o$>:gw+=/mx;kp Pm1MoFv%F~2Rn8{4mcw^ Y 4! J$/bkƹ&A-dh  LP2)IJvy1VD9e.pw8d_ǜٽߧ%%`u$X.7RiwNlPs_oN`rNvn_VwgO_(; w/olD/W*|\^VۓD+ż&YR{GgZ* ޸ʾ?>|bv?H@na~8"-וyݔ2QQ6"\n uaSuTy\g5'I+N$RݚˮP6m, ,lJOV>!@Ìɽ.I=\F$zLl]S P_< BwDPmՙ{,:B*!:'ij<3W8fLŻ8f_IC3EGU*\ms2a>!ݡ;l%s;(ﮛqR{BS_|}fIpomFk柮i;$XR{Va%6+9f L1͑*FXZpaUSmf)&atK/*J 8٨v10 yTPͤ}u \H\zjIn'<@Q12~_Xh2QfI㿇+>seB5b P4?m~_ IHߤ|SPϨ+.pv4fvoˌΘveZSҎbQy 52*1Q 6 ܪcZbm^=Bꅄp̰ajEtqOdl#lp޽]V2kySL AWpCW>{wƑ~d:N:Z};ch3@+;O Qs׮Q=0zFmx4nRHC.hHͩ>ȼ=ZƏުm.۪.V]+mQ{'&![ty`s?r!zYAM3, 7,X],=4Km C4 Sݤ_m<`tAtbQ' wY2/:F6!)oX3覼.![ ZmYT^>yt n- C)(PXak(2C"2wk2^GU@\]urN.>P12vGx Br'o.wPJqZZVX Gs.x[,w¼vyBHHM`~if<*UFD^4?y3V ZƦm77n7Ee#Aπ+Տ$Jl-`Z $뾐\?hOfv-%A8dtV|X =Y}nˆyELOy}O1wNÞ,{{}u,~8|wC\Oe545ߒ9kh,OYk5!A/R+؈F8'":K9GR_VP#JOT*R}#~/R'R0*e霯e'R XR p(XZJ-RyNVu刯a/eUjE^RPJ 4GRPZJ-Fi治RѴ%emî>QވSDiت\Q4lU%zDi0r RPZJ-ǽSF:'A,ecKK}YR:6J) C)e39l qk:)MJX:~a/ ӓEJ^,׽9HjF=ZK-*fs|~D_哷?g'bq璣l0%6"G=?[b"fR_MvI7;%ڣ<%-`x)ǣ}{G 26g<#azEJYd*$UTR+K2]`e+U6l# EzΆa}HX(ܥA\]߻+a"@FX  j(lMRԭ֍GOx%&]z8h'/"C!?Rzz0 5Os ;??jeZG={ib-AVeڟ]b*j45E1N]n?7F k5:ըpuA%~s5ۏD?\|P%hg"]ku&i61%vlwZKZk>\j)J3]q?+#! @FKM9kӦ1.צ1 m%i+՗$>"<)\Z}i[)qVJ_I5 +2J+l'm<җrV$GJjuKOJY)!/gԒٗb䊏Պ'aWRKh/~ғR 4jzKV"ն4A\2gԊrRսTR."DV/nr5Fz8˒#98P3%QP0E@4eVp!K)`tFk)˗bq+~H "նz="^TAȀ; Z"dB$JI!p~t!&NW c!4/duf>hToc>rPm9 9h 9hFd5.Spt1_x>Z#xĿނ(E[QI{Gu\Wb=OThuΏJ0ׇb*L_4g*}ZL:I' Iȼ/hsrMzL Fb)f^"0)~?[XΖ`o`Ki[̵DB=l%FH3 }Q[wRRR2E@(X)BnVNZ*PGZ(F:IKڢ/f334v]Tf?yx™wؽ୞)<•f]b-1Y,  D05eѕ2Jށ B0yis+19RKAڎ@q47Cg!6 Ո<>&j2]o<-)/~zt.F|iQ|ɉC@zg8RL;2(BBD  tlwѼt{턑pE@;}c&'Q!r(lr-1Rj(TŸ}T1oh - eBs !ʇT1gջKbpXHif~^{Fkd Ϭs2BqK Z./߽4Ի:+̖lXOOaw"W^v~W]. &S%dTXx_8Kvn+arǛe$5s1j6u#u.Bq<|I36~ "T·ih >@l4xN8?͓6baHhhPQPª{:~G)ڞݕAFi:osH>͆fMm(˺!hl2u4ƕ.2]qhIqxض $aKE3>>>,. eH@9V z+(s fPX  p%萰Rx-)Dƹ{p'l#^mKJ1vP~D5kl#}-ji\%;Z%+Ec(!T o>Li#LUw}ǜ'} 菌sd|;0/|[q=l|1GxTsr=bTEɭBqv {1yoFdnQi)8>])XFegi* F";H9ayWꝄS}u/BlUڽz8RB2hFF[i@u-#SDiK-ƚ+E (Wh+)rgunhiH:R,˜εAQ SC_)(^Ku%##) j+Q\.J L1,-GCͽ*$e3*C1T;IaXu4!P-xLJhyIt`sB ^ZJC:|/K#/ۆ0LI)C| K#r (M!:,EA8) C."a?cBfRvF:JRHWņteӯ\myWFdTkqtW!z~ K1tk5!cYp3x&0=FoK檶bo1l8j.j4AG'yd2@=k\ [< գ__Ξ*g׹]9 Ī]$|?bd1\x.~SZ+7nJC)_=`]h@7(0)A>0Q< ٷ!%?մ>vo4yjgQ8^YXG2!sh}PRSkϽE8 Zi1܄פ#jrQy4X?yIXQsǩ~ֻ߸@*Pt4YMI9Ry9Omҡ׮t]p|}G!s:F2+LΫQ_B*H|;K(Qr4r4h-zIXV ][ePvZ؜(=UEktXYtY-cҼxhYz^FZ3,v}9PpGhrVG/\v9uDZVbKS:RjFEb2kCi[㇧ih1V5<}92ԃA>v;A| 3{ B,=@vSo<"VDQ$>ѿF#9ZNm>56?Om=l }8GWc4~ṌPݹ_xK}Jd$^ѹaEyP04cԠ{0p(MV5gȏEycۻe]߆/#6;au۾/hM~q40ZP8D$=wxJ0SX1s0ZmQعȉ :~P"r"TGQ&DMh_#aZ\BHaUf5e] ~%Ta0{/H`NxZCֺ$azd;&96۞vq,]iG\wyGtOv1:(e(WG증8'] (`zЖ`bcc ǘԜ1`7)q~dBΝ~IP' yiTIbo?%Jq>Og.BJݙEp$5^,~gc\63CRsԍrνmlzȃ5suŚbuq/F5-\%} Ǜ_>2:cznwu<R~9}^^ܹE#-A~j;3ՒZwy?C2VS{Cm=BeCu' MMi:5wSAz}DvPݺtFz>,+7.6%S8_x[W bL7R1wZR6wND7mF4$K쎵qgj cް֐AdF2-ɵ+k"XGUehm,wJBwJ1`}NBoE1lCja@6h o-*xn\|Z./߽LfJj367e~?|r!k/+afۻ]@V|x|=zoW!U  un) y0t6^ ^F:r>Qt6HOY*~6ƒ.})f>ŧy(P>wS Y. 9Xu|X5oK4r $0&ΒPݷB UFS6ft@Axv!o6/=mƝTy! nq% .=e/@vYRMg4JNOzyz uq%+](=ERR˻';}T߬S8{NLơIK@>o֩F)Kyi F:RiTS~2J9˦-/P}Nmr(%<f@)q(-֔Qz(e( g8TK(=mrR^@)q(->N8W\;pueSD)Vq( NZ3 bRO5ERPJOiA:RPJ}c(=mRRsw_]_W9N1tyP]Rbo։2ܞ  >Y)Ps+ tO흻{.Θ޾zaq|Wji "Q {["qΦ&m4lP()-VQ)Bф)S"^H]Kw>60)* ß#OLIs9 L?r+k\RκS $OQt6ZzQhU36s<W+W]UO-&T0Ww{k=Ʒ MgoO=_]ҧY133o}&2W2}# ӑ=}w.3B)aH0͉FQ֥BusbYo.o%c| bQRQ뻣uZ &+ MY_Fve@8x}ޯAi=r88 g$pҡC:`d!=^ը1ZԜ#_t^ iDpp(b`!ƒIKr͎YF{VFNߩ+e '6#1eȸHB/ u-fNP2dMƉEJ lԠJG߇S_z?bCމO -=¦4 WsWhv=~uӷwkdFvQ1iq gxvɳȹpQ䮲s sX#*G,wگ >%93*aL@jX1~F^,קϼG*/!YSUMzJjoٗj5eJ#{D$AfK{Jzg^ [H؋eޯC J`\A,!cΧّE~|tzwzm\ '}(Ֆe !)ɬ+`޿lޗ"B]@8_ITt٩C ,Nh׬7A!V/)caR-h¾O-}>)'[Kbp2btÇ(ٻ _>-9URĎVR]ɉ`;S`r[ !ǝ&8Xa!(9jR4wP-sAg8jl %%ׄ?XI>J KX *oS$&KKU)p6G Ьx,)1['& --am/0_ >XMN4`8vI5 ΍N?|)] 7Fs?X^n(^\ǟ>}T9Hh<ƉKp-X{3>@kbs3 հ@\|{sf,2卉Ң%.ȔJs[7hcPgrClK8g`j>B~* }}`mbl^kDaruy]L]97p $iNU֏0$l"T~-|28 ϫF;AY<^|uw\au\XXR.Y3r46w,qYx1F!KoÃ^OoOߨI%'t1{-4u|{o7Ѓ޻p|nj/x?Of4'yx0j]_p;ɉ[S exz1Ziw۴CB )wBѕZ?|O3 >gRޕ{sKػ 3hyu~A WZڂaO`,Z/"]ƪ,d"ۍ`k2+}29G9ˬgd|q~#u) u~BW=U0 2UADm±Qjg`?Ty FTi1V,bUׄW$L yciCaZnw3ӛr4ˡ/S$nr9yd AGVD.IxEMp@z@T}D2 7ӎUVa~gbҊ թ^6 T[hCgw([(As= V(6eSÍQVVp%{WGEhth$PtGI:wFNJίdL5Djܱ]+1vH0o0p-:`tNFMU$J5f$e;j)o1BУq Ѹc2#\{fA#2GCzryéEy3fjE_*b.PRǨrע ˧cf_yM”76 -1IvkѠ6^9RՁWLv݈JA tRǨ@XS»p1 =2T .E1xK\'Y+pcpww7Ql1FwYNg jypspE m=ppG,n\ɨ+pDČwl%4'c#-jk3$ Tst9[hbY~]~B()Rc%TƵ1P3.@.hYՖaWAЏ}-]*_ze><#ޕ{M0D:#x0J)lIaݖHT^,Iʋɬ5zS,Q(ObS,<(ܶUXi{ա8CY)6I-"s?:TP*-RyJ9.&hБ<TLM'GX+Y-rxRw^$Pyp*b"@|d;w|y:>(zq'ٯn"`E!-ߙH#,5=.w?3l:K! tz:OG›OSC &3 f)OΟ6MW\VN#pɇ#!%4Z$(a Zd <5N:pzf,+tK=-S{f}b2 GGq(#&9J*o2r& ߄&I&KqԹ->ʇVGr2Vd-|8d jdiydKfLo9!N0ցI/dBvX qԺzsqXFZ1 _ZOh]{s/?٦r&B$>cWNkH B;]ZG0N@䶳 (H"Q=)SѤLΡDtb:'=]]o 3S":C_=gm{+!T""D" #"΃WpA"L`t'*/Dfyᅬ 9(^eK2T` K$pXXDI a%Β#({Q\uD=Uߥb|4a(O !0(Jj0&UE߮ Bιu;F ğΘw%,ݎ4~aX=ZQfnM!5±[wu:K͈@K(8 cUn|&OԘqɥ3qJr -ɘd* 9(6E5(C,L h,[/4\s-tf 4Sdf / IIV0jr@ 2 @"Fodl@89$P+Mԭ> 0)$\QFߢn{6˭ګ۠azN"*6*0tzÎ gz¹B Τ.qz[=ݹ8,FC!r%.6t 펽Bo8^b\.N oryEjn#6wQjy\*2_eAƸ|5[ ^J愻z $XR!rRi3; aF☁`3 )Q,ݑz W(ήazOKӴiȅ< lYesWw xx_s#tIvZUz>Ftb?“=4+*ҏu]h]* ԪtK}14Nma岋LT^s[I1hq:z;td_3ilZb!VDߕ}6NKq\TlN_jVgWbXx3z_~"_[29NjGn~Mǹ}6͏~_,q,q,q,j>KbAf̵F:`;xrⷱBnY\/zxnh] =5cP$Xas}%~vćB%C#aT)adI!OP(M D .v˧2!X=K=7Ԏu}|<7E27uZ{Jƚti(o_ T}ŷ^HRpyL^5mw%wMʷt(rJ[δm,e4AԮpTqfW;11E=vͨ"Xl_wnMs3Sk 'i5F Ӳm㪨ɮ :̟!DK <ں9h &\o 1 QU~ tufw@{jRym!0S׍6"y)M;)SR(k$`q c9_@qUH1Pl?ʍ] 4 >A|m{w1 Ș=NAZ2A3m\fE5Ȕe$58%9J)A*kke{YMfj$LrmqPRKL%r㯋>( (y)L"ڭHivz_oeڻ}~HR,^_F_9? .BU]b'2< |v=$:yFPKZd$%ʈll@ "ϵ*Do3>ݔD}.F\IlYf@ ɃѿC =e/`~3ۛN⣎[HRF ]1Ő$fsÔ;P)%KI( 3<4*+:&'1489u)k>M)L]j!=;u*ݾO~مϐ|0B@vP @L+dCe0I;@#InnagEQVva9 h6 Vh[‡gQKh<'q3jU4O!N< .ė &-1Xl]jכ:?<~xXeXXt;x")I5@xMQ;JƓϛ؁\>x9ee^ҋPAAHx er&YG'BDH|%FZ{>{>O-R==RsXWapKA"JtyFBp aK5Q2;we\tf"? AZnn (I.mLVhn9yd)cfXA2?gJI Ӕ8yʔ!.K{У6T(9fW|p"O2фJdPT#B 4;ٙ&S_ϟ6\^q'D`=Foy|)a;n("8"Yخ{u۠-=B|.)Q-]D\#@kvqr, 0^K$]A{V+ TExU+K#^=RTɑMGϗ #!re$S*ciy4_ O{^9osZBݕ|]^zs-uX 0ղv0< (ZU Kvz<*8<~t|^JWpE1ҎTE5,,?h(@C\^9k+5䮜nEh[J;$y#D%6E|LpO$$P6枈ϧ齫C:{?c$V%]ͦt>RdҪ&/2y\A!ԨKSPҫ߮%&Y(aCQ'kOv_xm|>jp!aTǖjI{)?Cl.cl8eæKn*:φX}M/&Z!_!a fm0[-a&-"@ xAε2qNQ"Af dHɱ>NKltŠeD뒮I&fS:7sش|;3{,Q@~}[֑w%ce촵4+DHVC(=d @JH2փ鿇ΏF5l.f0&AM3nZ3 c<*(ׯ7<5̻!1/j@6Kh\ h@&jrz)n{ 'O /Y {:=#nPV@lYU+%ewryD4/͏ۿNl2,?g"_G߯JjMU /[C.? 5Cutb:^"_ z!Y \ yN&2h038Aa"2υA:O͋YNK~kw2pxBh?ĢCX? \8z]aiࠋĪ%ɫi #gK)2&߳^'^ݪ~>5eTf6\*wEsS1Ev7/\_dbj;ǖ&5YDy̨@xCJn5A̓zlPuϥBaUҦԱjboKNK/wUVs|L[Ͷ!}Vj:P 䶚(XǶa[j wm˸-82xU Y8Vi֯k:D[S'e.},q߼{wY̧7/N@| ϴ̎ga)dާĂ4S4믧Y̅oc %ԓeg4JMx T9{k:AɅO)'K}ӛq_jA.ƽu[&*@3_W"1RN(TIv %B{{m>N _fBvn֛>E?R=R7…i t9g7/t˟_BnO )S`]ݘ𓷝wWASޏ{/~!JG:7+Hsx lOv_o:ZMk}k辯ǡlؤ&g?C: +x. ث>^ڇoih/jzqubp_=Am%zO?|tkIh}[TI"z&d#UZu ai !{=鞒C&ҁA=ko'|j, N]@hf~!VW0$J IuφZx1٦Ì_t!u&pkg^dm{믽%6s< D$UxL iw]ŪME59|Іe^'+;QTDߑ@V;Z1 OF3KLiNjXp׸N;@`ԅwwV T!~9ᚺQfg sîtYև,;sDvvS(j_Ɏ8(BZ0"}+)"Yƀ:&έM3CUS'k'L\)XD3 asLH+#!Y͵#+i^*HjB3L.;V)b ~)8RyULKݝΏ|+ayDxy)~B2PHora"C.zQ~j!7!,oi[SʻϮM\sx{g7ޢx8Ml3KP98l$~qx2ýwQ>Yl}JrAPmLKԶķf4HFp腸bc2M׃B Sq_s"xfsKHhK"_ЮYOEɤij^! ཿg# ~4 •'P-_<N3:(/eb0By~|( 3v.z!V l?O9[ K0BW܏,)'$.D ί.M;rθGk5" vй I0#r9Ka#ցK׷BP%`DP5h @(#V?/H(U, Sml!l|5#۞lps~%l,l7D]FO]]u\]uuuMoJI[& X5> ňИ=|aTbb66פ-ҷmFܜ(QqZ4bPZGcj]J%y6Fm4™?J+oy˳\"c=8,CX!<"CמE"RJ+Ň#;q>+[i%(f%%98\HeNBOӌ9gd2A#}t0P4R|̊`87hUi" cFЩ:-3ŔI#e"PgRS*|dhcp}9*6߽hR\~d h2xboE$j.K}:p3I}CõttL2}nq7mR@? tG4R&SVv/>JV]JdZ+?oB˦0L $JҖolyfm2]Lۥ7ģ.8~pv74'Ms|=\ۤ4cAxs۳磁 Zz%$| ZӇC goMS8TR#r{>& $djp'pNBWGD9TgSl|p|Ј;hz _V/N G&; }?A׃烀d jR2"?aT* IP.+Y -&6j2ŔRUEodrxP1A*IU޻u/Znd9Ys&)L6ܧQUϰw]ǽga.-fol.G`Z6;ȼo)*Dw+'4;WZ{/J)fl:l!a?h]ZZR"]%}]5H{#"6#%lL@6Z2#jSG!Y JZ e9* 0Z*FcDΘ & cpch4-c硌2vyeX P,Q̘vYcJx#HPP)r{DT * F C n)Lu'Z`Hv0pKj䲐?[VQI:+e{ᘒN [Ea4pp=Kc8DŽ2k9䤣ƵKjڀ@pew;VD"bE%Eyn (,G\p+2c4iWY7p ~mV͝@ѯ~֟sLt#9Iq]iDr攍?`+3F%8'0) %q4;k5s1oJ3Vq$iP$\+̈nQZ8%e)Kp 3C+e@#%rvHr|5TchBJvck X=ZpD>8pT6OkȘNWc'~?s5ad j%웣W,\zp{IOI uLZk\~p6m^|> -.ֹ16.F%'sWyh ?G^UydSh9PEƏ! T[뉦j.$ѫv4Λ/fY)9F@՗?Ұfq-c&Q$-n߭bB#2Utb !ɗl?fR.b{U%<˾Rۏյ.PxћO_ X&,vYLwBlWrcCIQ~ɛIr:'nM) <8WN%ED>™d ѿ9OJWM5GVyu"zkP|hxh37%ʮ:s/U<磅yx\?.r?sw{t*!)xkܓ_=m`{)/eN]DllDO䵿?ש}vZgv|ިw+Ymx e@tD@| nlQMx3Ksᛋ6wC~?̸fEᮧYc"Y }b)X߷l !F+T [0hD ''ޱEf#1uGK宇,)b RNUzlH6TUEbTDyu`5%S8EkerCɧ/^qb()A4.L#HRP*Uh)cܑX+E) ubgQ8PBpt& <갚En&%v)ٯ5OP;,'P6HKAKDM\ z"xxp:EfR w`b,˩`R6nb'g v &5N6_hfL x*N^:ݕf׷u@AG׼rXPn^po֣gq2:Qp8_԰]GdBՔP)zn~Nj|]٬o5&{+ô]6 **qw7r92J0:P6չH|!-sҬ\'ȥ#{{"c&q$NRI54'3Ug1lN׏N nM.d\bRN57]^J-ȗ~PLäT VAjT2z7# Py& -,ֱFЖ+ Q7B\  2bMՉwk0LQd,"6&t!*#IqHT*P:~TOB1/{fiƃw!5rfuBSQrX0EjTAr*J=zpf'T jp\aՆ%/"AH9DL`HHD;ox"8=KG"e=џD>")<+WSdo| f9Uʼ|P*m+!v~u,z6ɼ{&bz*%b{{$+PnueWaq\L.ʬ/ Fm n3xcȋ^s\R], TwmmɝA ^Kw%Sٹ>KKmL*g JSӖz\{s+  CDzⴒXB"JJsD-p;0Po]joF:f+#I Jx _KR5] AE hʛdVl[G60g@"L9tH PR:8JP!m^qlP c$T1G o)M=?K; 9G W#zkԣ5RҼu5Rj2:Q5B.='y\ۼDy|i>2>Oi~L˜c[WKʩ=}~3}@ҠGoĉ{oYTS0(,vq(S>p(ӆq(#2}1L.k_~2f~+4?h3spfe:'^=\a[ˇ n'1fi c>j{TO8hK`ϋ͖`d̠3D}(G 4%З=ju .fa~ \7r,]4o>ӓnji|aI/c0z:[*-@=ڂ9 ʂY 5Z5NztvbNF>O _GBӎבФ"'8}(%[48w>|@ئZ\synnm(# xd{.8ȶ=831-%ޏFNrys\閑Z_=ly.k8vZ5Rel~7uڕ ލټ3agښ6_aeOriU!k+]qbj0HP"ËT9x0HFLR-Dtt?]mp`<eNfsʎli9?9`"c39_=grXE+x%pKc}:+~x{jp0Eoj(|"d t_ ظĬuYo5RS0o{K_ym^yRO6fm$vtu" B*OF}@FBafμXXZ0DLc$SA!#xhFH%guڢgެS5[&@j_P SGV&(ֺ8 @tkoKmQћmQ[/BO /ig:(mg`&e+W7Q1c`ҐCcj.!axaBO'0r jĄ-Uq7~=\rZ9pW4d[i*c!?:1H@1N Kt$u  :+_Q6gVS&e`.jmRr"18u,8UIf:vVS\gR5li7/74FcȜ I"]+MTdI'ajfq .:L]Hx )Xn/k|P`]s#9q!:iU+4zd>fvudVȳ~o[5coh%exTd:~N31g|d!& Zw")Cm!rUl9iU`&#"Gѣ==L%j[ -ˊk*a=@/:gغ~S|(sLMF3X57WvO׽f?8KT?|dtkk7y%wr_LgG)j)]B|ey3)#8?SYwy:ljkc\/ŞAow65sՠޒ;%`%B'Jh wbkC`-Gzx[ۛ]6(X۴50xTncK`&޷60 SY5x;PZ /:0n?Kp.1n6oqӝY].QiQQ_btn26P*m{@w司a%;FJsڻ9e߹6. n%tÝ3Q],.iH.GY{ls 8: 83sF kؙmb<z>"AK}/vol[TKGh Cp߆TetK EJ@0 DgZB` uU\$ 堇pf;]Ds aԯԻ~=zAK{χp^:IZb>Vjഠ>r*؃<`$b5Y2" vyn~oI:>qqQք)1RrEH 1!oJMS"F}$h"s B lnv5Z6;{|1O;:o3AU W-]nC]Wj^SBsFfaV&~c͒8Qj8ʝdI*REԩJCMzg+ &;@j>ͫkf{YɄPa' R)FIJ$pɰALjT1Bi3[lRvt9tSKH[??ܾ2BK:n֤3~ -C1(;0U[~ZNwQAMh+W +_{a.кbPFuBQź0Vzڶn-kݚАW!:%;o;-_1槛x~}a$,M5fCXep?q]~Y)B]j,jC3eV|b|^|ȶ2kmh%h&jb|8 ]@M-}ƥ%O QyGB&f2EDeHtŻ! \[(B2ҿN#Z&8A(q.nX%~( um5_4pB%JTрD1C(-xQwE[vl/Zz`Gʺ);žu3-(ў4H[IRj,'B%کc=r `*fضx?Bi&/I>Sjes 4uU{<uB)LD5KCU3Wz ^ ~ S]W2rtmuO~^!)/+^:QlBEw5e[v%ނ ,!/Bխ+.V_BmkΫl8:6n!yFfCtγG؉8!M9مM}N[ ޖ]?'?w![`mPU]5}* wHԍ>&n{1ځ(N)Î IHiYy!%2pxw%&okec_ x p!rU(qs~.Jͨ޾WB&?~Xyj-qsj@/3֙V8/CR]1;xh ϟ-읳w9 {[`'8AHLT(MG1jA&V!$b@Fh>dJrݸ5Y=B^苮zN?NcQg` i/ CUik@`C-k(/ʱVL[n d)I"ͥ}i$`"ldĔ@f@l)Kt!BBEHq e,!@-&aW@#")y , ֏LPjKu̙BT*+Q۸9V:$!0;+Fb&&8M$ FB2alx %S%R, CĐA90`HJtMYL4+hfa|TcgGO=ʧl~d}t[Ͱ~h| ʹ4`P gQ5OLo4ffL"+|0j8{pEڒfz+>Uo{koΕO] ,LFiAƀxyͩ.fT E*#DL#ƥY_T| a6p& FKA *Xlp,y.Xhbk# V~]0멲,QfV5"Gin˱uD֑JcbB:4Q"QȮVTc TYWAA-8'W#w1)m6S•Âá[L9kΠwl)n޼Ѕm_\\'d!O- N츬Dj!+G>د幸)r-ޮhi \5|6aMj_j٧~6>mm曉>86y.Z#>`%כi*?7-؎ܓKDiRq,!x>X ڎ;D5T+2Y &42Xa HS)XI: ʚ;m%E>EMP9-% 9H5 E8RPx*A W /Ǘ!N4]B|"9i:!V5(<.o֤F pqI-O5-%,)hE"$E IB$K!KA:f*SjMaDbzWN"X'Z E Kq*-qҘ ƮԴ֢c.PӂRĄE2bCS 2H[ 7 mvf;WIM+Jo].ޛYQwID,d y@"ߓxc!suk`_X'2Lg uy?ϲ}0pfQYό@<z;7X|?)Sf_sv]/]K9v @X_s ft-aٖɂ1xxOTz: :5kYUuf/_]6d=.܁Ώ"%R|q)ƵUquu5MiɳMŤDnQҒ'e>MBj&xER1{j]6@Xj6:<}ZQ96w1B5tILå3$2mx@1*XD[|wݣ̗oєBK$9##O;hwĥK =6h{J |WȜ' 9ϽMj#̒9zZ7X {޶$/=~ȇCY nfL&7sF+[I$դ,Sd5ERGA6լWWUWcM_1 w0q$OmI+[ňvL I9p-wBYFy4SDmb范+>r.4'F/mi癱2'y#c 2(/12 PX ¥1U 0r>Crx1LJbm T>3WH2"J4UJBІ0ͺ4QLkxJ!B8fBk֬1K^#kqBIHi@7JB8C=B T p>2߿܏]ؐ7M}ZiExg)כ9֭fS//6FMQ<EX3U8yxku C2.sNqǭI0[ D/Lތ[퀈@3?)ۂa:xF/_pl{we{zf U.WhF˳%xaW/uGYBemP'EwE߻_G!MICiBGۆE"f>)ː|ɶ DC5%v/m㝡fC=B-˝`G\%ŕ8 $;?NmG%hiz?gmj{;~mcIژNrMoq^i#I|9䶀3ϛ`V,㴾Z/n% Etwbo dQ0Хd?!`ťָx߳u:cZ\)0ctP\/w#Mz_~:xͿ Ϳ]B\:evŃ$B ]&#w[0ŅoUiB:LPbNw۾}D^LX͎"FNPvʡ=1򓎑{Cs)(9U%+TEzfǡKYkC(;BHwBT賔y6)?ǂIx>"_'Fo6dDYhQ L3.P">SȷAQ]am _O۽o.XN`Om~Ὗt:mZ Z<KrIK ZOmovGLbZj]Xȁܯق BkФժx2@f:鼊fn@Vj'StœVX'1A$x䕘TK sqɹ(U~lSHlv3ϊwpNRĶjlkmv6omItζAR?f] `jU*=W'bu!qRJ;qH)!qRC-YJO]J# @nRʬSu[ <E̩Ӗ˄Zrq<*TAMJ/z$ʸC<ק⑗<@RtR#krf ,8Ċ+g,9;)}Ajy\iK)VqRUM#qRԃ*ԒZ4Λ9DKS- \i:n2Zq^2f$B шpDj"5qPs8EG": 8m):NJ .)-w5 )r'sw:ªʭZT6hTqnB|n8Rݜ@^}; +k{WWvޚ$t8鶼9{?j s 2H&K2"}DrfkQRM4&XX`P9>/HǍaӆw>՜>;gU=:kZQh49k8RLK]Κ4AY#ƴ/@NP l]zH0AqѶ>ӎאRFa##}BA-uuY͵3)Rd>)íB̥>ء6ӔMꒀ1^d*1:ն)1lB@"J.*@KlJT0S  e"`PR\T*H*>EȓԦHI3ᄱ&50j6BȔ"tigb=Y|ؘU)$}0EEGHʽW^zq)I-l7򌧆:e3 ?2ZH߄`\Gaŝz=0e m&GVOCqّ,x+Wc/X>Xi6xy.|#}5t6gȻ5U? W},>ݻ}7pL+VFf:nYD\0SFxN`غPD)k'lfEɻnQF_U}H)|UփZW<r$0r3 9]cJ)+zSEOQ,O'[d`IWļakqɎ>>9k'xブQ,.9gdOW\gGwZdx#EB(r !P_mu-P͘8Cp=!]F+D2\;$#VpH{,R0ZGDlϤ MI2AgH@PGBYݶx~cjوGNÕڥDBxl %.eYR+bX/xPTw7gWYGHThFxd 7 1…&R# _kιbhE(u9RԬQFݧ v0J $f/@PRߍ%W22^%oWՁQ{wO?}f4 hdE}VPG j_6Wԧ TÈ&tA`+3M-J!wsaSMn:dJ[gT1-)M JvԿΘ`+z&c([%S,4*3RP&(\L3G8kU{.%9y-J"RQ`/*Iݒ-ț򷢌"(3"UQ{Y\]U0y}á7l5ɥm'd|uy8]+3,t@_)ƥM\"5Lp mxwxKв)R:N`Ê8}^6߂B ċ 30%n n~oRDAuKv{/w"i7Y&!0&IRxpvxg Jqۘq۶%X¨K?$YXpah&?`ϸsFi|d?0y#<~/;v#0Sjy7FR;yµ)4RaHB])?\~rop{5G5{?> Q4e7f~ S7ALILxe}.Y)pd5g8V{3E}l4ETL(7qjF9|o8#}@=۴s6 xaT6):{5^q 7hBZh8B tg$?P#$dϘU%lQ\x\7%zsoE^[Ҧ~Κ%z|޹V㨠Z>w.V82r車"rJ*5D1~Ψ1AwJM4~lgġ mš${ݽ e) u8=1AM_B{A!RX6m>;Ԛ#|Lxpݵ RRσ~<8X]`6n;Ԍa^ PsQ LôH(gB2Z*ZhΛwNmx^~Ͼ v|aLpcJ7jJ_/{߿-z}?hUX܍4xCј%੤=]Wt12hkfG8юY`?r~+ז$F2UdYFBѕcuks.4"XTm*4R^!4=`jyHIi#Dbrȭ(x.Ex=&xƓOZ? N5C50qEd\]E> п;Mk/aM4tow1IGeI)lh jPJ u0ZX PΣ9dHѰ1|"OS-|-twYLÛ<@(Z %OM"JE`.HEpqpT[4bJ|LiՓ\2W(gkĔF!2 ñ;a.:6h0C{ǍZ.Cַ8`Ӯ&َ%id;E+HvkѰ56XYҨ+^ᚴן:J0*&}ҏ`-xR®]W{dnj|y!#ψ`H*  p}f;wX%t 377TdqH)@^C#U93N\'d-u-3# v[5o6s@ڕe [,'A!21"6& nMimuAy$Zֺܞ`ԄC]ݘޡk+Y )C'lPVe} &Wlqni{/I^mƯ߭chxLEqXk8Xd|ׁ0% ,fFMF|ݴrM߶boV? 0{hxܶk2j[хo#@ B׽H YI2|*_\ N^n\zq`#'!lK*gmwp۔[jD9蝨 )I$0i ʦa\Ðد7P-@{a%nI+WWroLݺ/HeU: Mx}|.*|~63&4Qtu 2`%JisZõP j^3T+F'8 MU JsIE)е<:Or*>zeAYHliz,0v.ãL+_ȾyBj[_ r:j}GoSJOqm·h=edtB but;EnҞ[|/^2aìFG)uBdNFIbe$\ӟ   ca'$g#A_~^6XO:O -F,Oe6Fm|csj㻬[cp.T"sg 9Yr`R]g9g~AN_> 6DOpn9ʒ:|)⤇ccp8,0坷Ǟ; >TsQ@̌HH @#dfyEG͔.`>OM o `w1RyfxVw.Dݮ6@ڬkvo 44i}-ˉ71ߓX,9c@.3?3p.-τRJ/>^qo鵋˛;Lw,ޢ]]X`ꗺt٤KVgufi{;vON!{|nTFgx OXbXA!"ΖKtf`ʐKb^m%J>b#صO El*@&WRkIܼO3;ׁ?}`g_ܯzS0Lׯ?bݚεe܋Xm>ľMCwR@&: (ͻJ{UyU̸P/]Յ]; ֩ ʫ{1ɭ~wBmR}}7٤KjZzHV"pi\}'Wd0Kjkp{ph~zVT&i m}HN㔞Z*漢vUu\\y+fW¤1BWݙm4w՗p1yb[9b{]&U7> {ޫ]்Mz4s6+co @eFr4PT.0r*ʈN,/W.ow򳺹%fIdx%tڠjY%n981O[엷MYS* =[s1& ˏIBYadr:;k9=VcJCYQfCz8T2&`AViɠZ2YA 4zNuy`h`@GPӞ>|=XÐ]CK!*EKe#BqA{0kQWs?0˃3,tMA5FlGڑ$qF3d.twCR*dyy" h؋MNho3,<$?>/֙mԗ*' <4_| uo/o._»W,_Y-E&/7J-#zNO{VZ._w_Z*c fm$pY+,ZxH~^-$ ﮮkwqJGBb@q3_<:9sg ʁaOρ#0h Ї#ZYs;̗Oע DI?\&Ei)'~E@jĬFroH`ɗp纔SJ&z曭 q42#,Dn\й'gx1镻'|+EԶ',|d k $OZ:aUq!.FE$T8xeY#K~~tŲn/jd2`$9] }yb(XQc/"d9čdy83ipc7I M0oU:F2Ls gVu^r KW(UV!*n~b<YPc~wdpp%BC93(j+6TN:E/%dЋqӊ Б ELMԍhFF}Nl@s-/{#Nۿ/<4Ic.>;"~dMo7+!8y긼 rDE!I\Eu1crҘ0Dce4"$dC4V -Oy~?_g5d~Ͷ;w~ wsA{! ѱꝋb륫r޺7~Hu/v!WxApճKݿK 6JZ)@Xk"Ͽ SF=//Msgs;,JҟmEena'2&9cQBf*gզ,}nL9e.؟3J_BeJO^ R_3Y?a k2k£ vo-mNΉW%Ѝh%DOg#n_/yA06uGCqj~ 'X̋PCs#kQc@KuzpO.@WXYk=7$]NVדŃk=MY'}b86\9Os+"gu[6;imfXKy&6 1 &-ύA VyG;6k菂ZvN#(^ kF\ D^VPNiHvzc!>+yw@XaH,4R^ʰ2ҋ"jk' dG(Sm4?("RW豻DiFsNfl ȗ \5FrXƺ1֑TF+2-Ext; ڰGAuOPO2$_B΋L7ἁ&ImaPr3vkXn1 u{r182's>R[lYO6 Jk[6ãg񛿾˿/H=RKWW篸VF*8 $C-W<տHN*%,Z͟ݝ8#xXe0F5$9'Iޅ[mӶI?X#>!GX1jY)k!hԤu _%4@?Z=k% bmP N3N4uJ[jĀ )*4&g`4btt8*-)%dZgApit֩?1+p֣C .0KHwQ{S@֧㢦 C1yt`= ax%6 A_OIH{R$j غ'b޵q$.~U?耜%k#O%)yERԈi!G-Lwͯ]]͎1UEp+iI>ʗIjk 8 $ ydik"bDv-cyG#IIĂ WC)#"X=0G\"6DQ)75 8>Ȣ s D 4'1 )"^k1.3:4ZBj"'c%XRnB(\ -&!Y婔h$2FB8(M-[i wi'q&#bG@L! >nio( x󛻧xA濟wK7&^bUĞ { _Pɑևڭ'3\^+Z\?pϯ7ޟ \E[@{p/.HBUg;oyFp}c0fҠu)aJN^7|5&8m4zSqEFSFѩ!0DH ]+0hZl-w3qUKS@.W56 ۭfX{/7>NapOڕO3/SPףa^";s2udgb4_}88 a_:n͠ zy z5Y ~vp@JQPKyF}U)DB;!:u0""+>%G#tRrŽpZu;:m0܌[(NnF6y;IgkrK~O.ZPEWB|oKk`J_t{y*{y*W/.R $hZcN h NDMѵv OEnGDn[J3w9g,vVZP8rmzXyF#K 4w"}R 0ؔP9mdGeKqQ@4c+#A{MNz•ƢwDL5'ƈ!8XeybZh[`)F*GjycHHpN QC2ZG5^*Ps0`8B8h˥b@&ZcL@:XE}Xiȱ VR@МTH;^;g2%`5s 6i|[ h FPlZõM ]+.H zqY+N2qUP*Rʎ(/n* c5kg홠`r־\D{9r$W{ zRk*HBjfQQrFa95 "5V%KXJQEv9I8 J4E.=?u;|kit& 3kF.۞\Cdʮ*ٺp0  u J \-MBx `@,zG5I HˍCmztE4X^DO2%gbgJK=j"9#I6gSQ1Fbuj]N/oJ >(EF!ǙmX\1SI` Ί Ep)o\Z\jtL#.tF+Kslb؆\s}OAtgA6D'ݞNt ѹwVNAh'b:17 #P/{$Vra2߿~T+;Gvd)0z9lyZRtl֫\d\dgQ?niUQ{JZ-sOoPj:v8'!ʹlR: >$nRHa V[ӗh;aG꘤Cw#8G$aNrGJ+sG[ #tďo91oYiG939,V0銥U4*gMG&ԚE=5 ʇ&Z(AVMDlZq L/r.HБ/"O ze^Fc+ ;B&nuK\׼Hz{x$nWS:g[S>7EgHfJyϾe=2$x7Pzy@+r֣Ds1Nermdy,hJpC-L'[9nGv^]>~Zl7URSJhsM&[6 VqnK,FIdi b56$l{b '8H Tpq<'80O`iWJP`F9}m> OPrI.Q/"tE4ӨcH5wHoٟxFj@@Q.{1M0bˉHbg pD-i~]0k+b9x>߭N%%ڬ^/1I%0C#z\6/^tHIcWHAž\݉> gvhU;β1X'B$[4 Qg!v& ^zx94x11CNk4fv̓Ə5Hpژ55KqǵOxWК>Qr P5%Âț&%X' -4 XCi^/:W+J5b@]8{8FIi61z'AgN4<ׄ='~9!G b dDTI`/K`/K`/K`,唪k",0/A_=3< bh^HnGRvK8Bѿ93d&"mn~"%䳲XHqԆvjo7;BPdcXi=%זѽz2pYyN22ېZ )sg8hTtme8KnSȯk>{s'&)WWG{7\ԁP7JtJϲAF6jD&?>Ɏ=hUD$~ʡz~xEww~ XGFSp TmM[~7xg N9ZKɬempǘ7`3fw-~o]w7'04}-f?7ΥY$.=k/qp Cᑌ':}|k'xcM8׆c~^__g* ?T ɵ4 c*}~W8\^e.&мX_ZCOȹqq泼V1h"艹sڣk?Mɶ߆w? ̒x-|l.zlB>:͍ar'%IĨ=HS yLg :̅;\$-w}w*=iB)NJHBq%S`޾-i7M [] N>6n Oiؾ[K.$ѭe-8GE F(!r*N O V0WN$0+8r.'TYaLYƾJd`*a+;w~yJ3#(7z9TwP,87]"xum5(%E9V6M=ɭ\ɬVZ*:kP4YrS"攪Tlr9m2A`zq2-8$Jj=Sށkn5N)7|jEKn<*'ycf`g5{=bc\W_r'XD$8/l_dN2MJž7Ԃ+2Fojng<[t长t>\ !Igip" |Iw 8MG5Ƞ<#*.W<})sX{f/|o"yO Z*F}=Fu|,֜/ն]1C*X󩴦q!q:Vwr 86Tis ,g5xr, wNG8dٸ_Dw/I2:HfoΎMBg쨮.n'CV)k8tVpJ=zhTpOi޾㋉.@J vNUԭ/i>u<}oo~9z//ŚֻٜLtۭzAݥ2k/}:3L_kṬotb i-`42G)p(V(˂ A);Sa pHb]l֊w/RR]i]fPɛ'r%Lt=;"/UtMKhJkT0)QǕhy6Kj\VK@IHThbJK'u(WbJ1OGE \SY`(Ťt$H6q0O8,=z(始<NXĈFX՞2Qb{+d.Ť9DI&=% FS1»Q'3aut%{;pidCXy1+v_rhe:a0yԟD>-YyWS@>&H{7~H^d7P>y$3Ҭ>Lci`4HdrQKkP>PH鱓KxTD]Yr7G=ړ|W!>XX({Z(V-,ǯw!XGQo+0:Ɋ2c@}yPq+`(M(a۩fU UU\W?@ʜ I~5ԁU€XEιsRx-Qkmp4Jj |X8Sg&i\)'Qo}$aRC˨n?RC 7n+9 pQc:EsK/mlk Ĵ2Arhd#ö2u^dMԎ}OY;V3өR% MUȨF:HzStJL-ɬOZa2|2?V՞on]弣[c!-m2ȕffa)9GP5(f;?Xfm!=}g됾< +Wr{Yꐾ1iig?ߴڈSjb%) RRj/e2Sj!WE{S& ·# Fw;*qji2ֻmr]>.Xf[Oo+o]Bi_gFRVt4 _V3h+ i+=ߘ 셛q5u5'=.Z5ґ$X)ٕhǦ%6ߴI5hIgKUt)~۞ etG`(pylI߼OSJqJ_3_LfZ! ۟X~|~im::Oṟfx۴j c(I-O7̢4 /~)?o MIjQ$=f"9y$,ArB_TdA}ZJ#+?=-32s `s`iZSrx _Uz0r:)rU>-OxW`ϳsg e;x`: 쵻dA kɷzx5cpn-Ցq>ڐ!%Qv;ˊ*.:c%orGi%nԔW!WKӶz*U`l.J~|uzvӇw--֜^lpKx EHU΄TMm*Zoe$ 9<7%HF}ޞoQ)v+6K$)wU8QuUa_O5fn1Q6 m~_YAv6\2柎`5 ;2 Ȫ"mȓfvf8P Br>>WӚ>Y|߸"?9/FoM#PYY"AB؜s(.MKCHPB:0SEn J/e[ji4 L a;^gڬQ7Hjv3&Zi7H&A]V%GH. W _ KcYw/r:V7Km˕]h?kn'')~ jc0uiH A`r{lMrFs1lK#9h֍MMV4up%"Mjdn1 1:QWޛ;6{#ګ+֨!+f =Dk5˲tjupZ>( v~dևX`bF$NA ZCR툥Gm͑@VGf:ú?oQ_ rA~MȼezLz}ݔBňl[3t"kwTF/"p~{ l=*+]zx_Y- /PJ!OGI¼eҊ¬gM!%{8Ô]V8'$'{!!u\aG52_|6ceN?:}yzyx3J(mu3`4BΤd֘\H:`!VVHKKz߻u}z;%ᮐE ~U3jݺ>Sg )Zfix~Jca@f9udT*DKmJ):[LM٦jvju5p]k.e'wf ,;J5ʃ5ueݥld0vvnږ)Iv!Bqny{ L>FK(iF(A_A51B값n:9_8ߣq^\e9 %ڋd>_;qEɞo\9JQ+W \.KK E2G-#s[ۣw(=DJI\Q$ QKܼԤAsIo yS,67D9i5>(wf%ti#Evu )nQًO>sRmmo7U{84}XJ4Cl a(=:Z4w=[_9cv]}J V?,:럞߼{:}#S(h)_ӁOs|NVu4pR2טp1ʒ)|g*5^#!Rot//dpc}m}pD)L=Hg({ _f,QR2ֆKE+^_pRkQ AmPC7.5֯j3{ @u!gn3SP.d=UeDhY[8(}F#"=N5JMyA.t3^^e;tD#4 ^ o?nj}P3r|^k=S߆y`{goT뱵7f6w>=.WhV4/ͬOy[N-Y!F#RJJ&z A߆M7M͗UYGސ><}e#vUGqm?tx0Ȃ.׿~Kcb5@`*R) Zj7 2rg_jNIvfjS!{.%'0/'!YX{2q,XYL\}1LL@w[\FX]@hɸ}X##2yŚǧ0&1gV,!TQ+!|޲|M:@!)S;I'`OD!nQ྅P`G9kZ;~ 2TW}Srz󕜳f͗_kҒ/Hʜg||s||Ci5ZOMZ^<$Reopbmbf3X\[Lj.١ ;P}ۙ8 rl[r2{y|`T xm:QzJpa؃U#~N88vL>Cn7"[- /.5xe-Ld  LBakϐ(o[Y 3Ҿ}8n}h7>GGkڷQ "Ԡ9PJQd &a>/TkP?۞? C Y8S[fIg"%:Nh_渿 uMMhI)pB j NYks@dj]^$ c"Atqm']t8 |ҐhMhE' &5P}c^>FYn:u. SPkyVPkːUFJSW o{I{"gb;)@NkJ"{ "٭E(E^G_3Z)XV AmZ+e\aW.QRix 3{#.tw_8|66:n ֡wj#Ҩ6Ȏ}fEk+@8;[NvPIl4Rsb/c5ohZUvzl& ;y.EecRB^f)D j75Xl3˻T~Ƥi4;GLYB;K9ĖdU!pt~H3*rrb b"qSϭD"{E7 D迄9=V%Vӓیj1jXL{I>39uݏO>,YyW}L,~xbXI<5iC)`K͍*EM8 (dIٙ~%_XX%DLYskM:iSIDCeh.l=gהZ_)M 3n=853|{n,:^9/0Eu{L\M_sFW?Vb-Ll`f6h>枕f If3ht%L&Kz!=N^paE9rȖgQh+ք-&-Vb'iiKXGdV`,=߸Xq\k,e3ŗD"Pf]/\ 3a0=;O+ b쪔\WڒSIӣl;VW&hnRI)pe_ ;RHAFUyT\S#K]˂=‘2o<%3Cdav}4OLBj)=/]iy5Ŵ)V񧇻n^=x_ypݻOէߵκ>nNn4z7򅟍+G )j ]TQ V֑M V1 pRMuiKoH둥u7. 9pAMa5"% ҧP{M{0e#Ik\egh]{tݒEZ >P +sZ ؤ.7^?{W8/;q}阘kg;*5c[uDH%Y@R*[%D"S@PԚ}7ujP*тPY7 b…d7\Eo11{`;lw 7Mߔǎtg=-p4Rpp5VX݃k )X&( zXtNyo"vƛod% C/|ߞ{)l5PԋVỏD4</xQ$@˾dn@5 H2!ab5͝aڑqS~ņkn$B2%dYR'edRH,eR4*` v ւ|⠳V©̌AiXjgAa[@Z,C""(Iy tR%r,i^$*9Ʈ/uj#XYʢ+C8a\o(>`*5{+TP}'m tR;eC'_Ǒ?KXLFH+ M4jz\Ɉ>j.mP.Iܓ!89ͽ\ԗ0]E!(AԱلJ6Z(ۯ?J]4f3 h!IXZIDq!|0.K/΂K}g< qq@aɶl(u͏釦,=#[suAsĥM:]އ4eDʾ$n$Še*9@z/0PP"EҢ!ra/HdP٢*U<_W r啤vXo݁Ay1/p82vmln3OJ>{UWeZLGDjZ1uk1`)!ˬbՆ@~n)@7_L}6?ͩgЯ`F?d҄[=-,*E1 NJ@6JgZ*:)]^ 0J4XLJG-9@o0 ùt]zSP s2$Ap^ô;)v S9ߺ}eIMeGg^<ߓ‘Ogdܞek Py+5q_ 3֑A!W5XJ7l ˅-BԪ? ),AC3I2&h@$ؤ)DK"au 0BIz3FԾQpHea ڊF )N(kI* nEAθ&<d)j JFEeO6ZUwmN5Z<(>.Vɜ :{W 1^%RFXl4Cb&"<F>R8c=ö^k{W#JɈLg"heΰbݚ ,fRS(,5@, FhT۠u 9y(w]7?+;,CU7%2(ޏ qRCIB?Z07۽'0V޽7%#U 0pqqK]ފ-:V.w^+d]hO>so%*SCU4FLc.52?u DubQǺ;p3hݚА7Q:UjWs׺ `b1QgԱn\E0Q>׶uOn nMhWuJ0TSrr,d@+I{ spTg!̂%wpjtLC⯋4gcn].?z8= ;8J$cj:J3,u?A3ҢF?*7|s !V&Au窼_zJj}X3#T^9U7KX7[GLT@;SR)xVO׿$ W}0\h,WV(b2O't-/x+Q35j5}o=Ax|vE :RIXL*꫚>5{_E䬨*67C CQIHw3_ NGs[7z:vLF[FH '~sYԟYp>93z.5´F%_ '`&^uJ}>Uȝ\Kbj|Νy'w!p;-s}&+ͳ3/#ᛐ,f>/qq cH0.5ҫvo>[^ܻ{VB5fXbE&Rh  -()[{祰?q^37]PE}f}v#/Ǜ5郚,3㿦m#KI1)'3Y|.$w!>9_j<&hd6/BN?S5Ve1=J6(RYI9e{h)%jnw)&J1p'x d+RAg4n:,p`29}R0RA",Rk @&*H8Jg>C }{U*Wт!Mk#KWD%lF<㲉yZ?ޖ m[ȷhWt:+{Aa%^P(e- |p Y @)A(P|6U򩵵 jY*2N}#j`R&LpA2$}a8s=Z`K9& o᧼uE9>lD)kI;BJ: nSCo饳:W9՜' 絨'3VC yF͛EGo&"KIM¸ٽe™3HK<?}3 hBPJo/mO8f$CбɡXs_f)QHI[W#暠7(pp ^ȰlyWc2=Qȫmh_/:VYf:͎LW[͓Dgz_!Y;%,6@y X'E3oQԒn]len_BȡnC<]mm|(Ð5$Hbj|;JuSIF\fiN 2iQDzӷR'!MV^&o2yUot>ػY.hB&UDFA((ZiF4S/ ˆ@m_t1WQ/LyUX9,[|k֊J6drb}h&l34pT+b@GάƛwqܒiK 5˻Ymѿ[Ȫ Q-Bdhj1%.QN1MR GLdh9ªR! C Tϔ(]Ʊ(S OUiIVdiZ/' M5$;ؖ\[;DN*N^v/hNo φ@{+޻-m6=VI>񃄼R_pdWI-@!;=le"Ӿ T++.=8/śAKf=j>oCu=&gC"z+EXvkR!K5 =3Nj7䨡fz2vQ)Pye٠$ӀWp6 oӇ?Yhz{FUG흈ѫ@f2KTSmg/zZuyg g_ڗ>j6i6HR75:xn)WFqC?W%dV7OԥVeuW7;@Tk?[`m uB52Kv5<9ԯqO(!-u0Sa3+CՁi UYTgT4w $ $ߥ:>~*#݆hA_z3=FpSe5f;0"P;,wpCv4f59M-mR)Vpx -/<RSIhQ7z]R3tȜ7lLI["DwɔᏑ+hBWb͊ws0޺8ьZd=rzlˀb"Rp>zQQ?583E%ܓ]V8݇|5Ȇ3y?'x_TN'΄#+FHi$A kE4`NFBt/Z<3DiAnp_<7|i[F+> .!"U-N:nn+#`*eZc.pGbYUJ+E3éՌq-d(! 驊Z$+'z$V_=@~pz8ӿB.3U K5=@)#U"cݠ-%_0yȹB鳝O]>d0Bӻھ߮jpm|fUxQ^ MLo8,-+LxJ~ʰRX<}Yao( JZZ3{o;< ޳W[JlgDy|K\:"e{ylL#jEöWىawz2:J kQ fsC]e554xQJ**ߏc,I!*ƴFe&OL8g':koNf sCۮOGfֳk398=tjQʮsl~ _$'?(duXX lrmX5kj9R):a nl^--&CuhFV]f+,tPRѮ @ 0\pRF~NÄ}jhEԮ*з&*5Ń _E1'UTt^BE_RN7+RaD}kioAbԳ)]ZݧEtKQG(9NFSimAՎX)b6҅эJrH{_8Ixb :ofm}z«.W`-'=y'u+4;LO_2#ѾeecDYZְO>juGE ^O D*z>T=2Jo,кR%֘KYViVxU@Xx֩ Wr8S0X{=zmV8^D|LC퉥jFKf~lY9[p:ߝFQVmX1Iq/Yg&KS֬ގCC^5Y&h9Vã)9M̵aofzr Nh !;ewYu;y %b?ɚk` xlUaAs7z==.򙘵V=2j2$gT#Gqwy̽ˊVw$vk\#] X׋3EX˪[6h[zK@Ck|:a#9N=:cՊ3d|`2 ?mt)IRW,9NT ]B ^} -c',~{$١~}YojOb<;oJNDAt#d5q"7ݿ9] jG\9h)bw]H^*?G-~gIםx͙]7>IZU{ޠ#)ig(A=ڀ?A[w.(~ f .*9"͞ Lݷ{Z&)s Kf Rٞ5 bjE$-QvQ((M3T]Zf.ωeϓzIJ">{991L2 ׊pʤSZj¯qvDU-"FEKD88G_؀모%99*BQ Ε x e N(Ԗ:&%@ cE6#02JAG< xS|R,2LT AG 0D!D.նXNqm5#ᒈ Ep bZ R=_FjddSO*%ɪ0TNwOʇwӰxϦd؇oVy`< sӯ~|=> f x2ďO𷴘r1[̵O;|"6e4Ӄ@H<|C8 @1Ť>§hRDҳrE lvV F(`'ѯ5Z2)g=X7( Ӵ !ډeG+D i$ZRx=$11$ʘ$=X3贃*|{IRFI*KNbT^)b^ ;%~k51҃ՇѤxzx; [{ZQ+_tw=-ib~JAvXO|z\$ǙwR|"eȤXbl|aH0Fn0˄bX"z9|!abdM#ܡN{nh_ GPkz[m8b˚;G D7Mofqf;Ui ȡx*,CF 7L:0*HuRNMRIM/)+ ?o/G(@؛;d_8!9zIg !w}ǍX@p(^Yք ɇ_xΆ[*W8)n0>NFBqӗ9M Aҽy-Nu#Uk7lPt K~QgїZ5#1S8hYzХ@ V%SSh)eyT:`F|&)!U_)M>>[ڹvfzl S/Vx%M9eƤmŋ\?h#65T)#(05OX{e/W-O//UHx6+7~K5wvx=[P=a=$yAS.4M|}xp(!jM?dw/ݽLvjw#*%ijW֥#M)5Wjɣ*Em@>~K).4JVh] >N>@=roC d*T^y8 /Zl0di)fDX󥃑*8 |<@-xRȈ^ rQGFts^+KF#B}zTO˜$S5(v bkw[mҧ GJ#Qpm>)"w6d\'Փ|Odա^ wڥ!JrYT^RKXq9i()dmq:l{Sa)zV-X ùe\? ɸbRajϬ~tj`NI+*>9yc|ZSP h=.Oi #I/~0+ 8iP2 8`ȅ؛  vT"$fxIhl]|wkqH0_v)1lgqf$fik=ƒƎoI=GM5IQ_UWWwNkf|QN_ᖒBwͭ6DsGd< *rT!mf\0= 6lK ?-Id48X.݁@%=]g gSԮq}يɚD q6b3f( Ƙ}ZEFht&t TĐXHJrCm 2-5,젔N3'h=iF`ڈ5I)%r_Cd;ܩP~Pa-5#@PAtq`cDSm-0Ns焠 ?#iKgqrVk/,Lty7OPݶ&8j)AQ-GEDVaNfyeV4Y ۪@&jJ2KM>wO>.s/䇁=` E|+8y(#J0Dp\l$sR* CyrɁ>'d6E)J*X`@O5W㇪_^S-%n+p)+oVmo. %#/)pEHw}xoK|{‘jag;{Toߢ1v߇?/KEݛ/bQR'tܢy` ੪wC̛ٛ Ea6?os/awN^v0J7)oݲђ|"%SL 9nv DtbD]ζ&zn nmH7.Q2-C+~ jX BD'M%OZ[DC[2)1Mu;z_ j^xޔ.EP_9h6WZ ygp{|'3v7I^?l}bgc)}[WIlJ=lKNR Fԭ*`fYM;ŻeфL,4雵0Bd29KrJXAy5aZ*y<7,G`fNn};5r,cI\m3o. ;VL檒_ mUau6@wPv\bArEl|1)DN(F-ɔVT Nppчsw!E1xKx`Q5}ka+G採+7-TE'x[ׇ9|m_ݼ[.r ,x?{}{.|Uu5?Ɵ* G#ŞplGa$vsv\={RbIDoy`S)d;y:,<y` 'w'{ sC8Z2T:uihd Ç 29U4%]<4ȊR< ,GnVy^AC9K2ZlQEg螥ʏ{꽏1i3v1ieriylZ|v7Zv k{8zc"y 3oqIƈn,A' *ĝ U|Yϙ¡Epp"lG`&#V hx' E`Fc@ਫ;qAkvJ•5BaX(sTHnHrIdO/sGϼ.Vnh4&L9jK/؀Le%/RǨr 92i T2)M,gHCsaYpUT2GנJN\#<&Fbs<HJok]T} }`Kngã|~g3)/|Vh)dp-bn@義Z.88[/{0iiBb  [ "&u _$ůgc`R)֥u8gπKͩ8IO8ޓ/_v_v3lFznAv$!ͺ6كnDZ Aq k1dȥ MjIVH _`[}->\\̕5hpϵxkKʕe!`krt 7 h! EQ˩SbBEB% 11HQ~ŗVeH3E < d,҈;cÂ!2ܤrv}'Eû=:H+jP&t2&i/x58)-GYΏEj4mmq+V\(˥u/N{񭯆r{x61w|gӽ\d',<dL*Qb1lEV6'r0$8OQAp'7Rh5Q_wn =LY|iA1S{]֥>y5ؐsgyiqXcbdR[|4Ì SɄb:ZH*ak@N,y|mE7|kUQ߻ݝ^6,E 8Z+Z8vy xyǔ UqvDŽ&5D%IM!Иc 9!yL"\?GS#XWOi["3@3S41ۭK9I 0D<3kRnKMUT3S)$jǂÉ 99}S=,,MoGw?j@1M&>+}EqݓӐDj_~* >Fg?ܙuZ}㟋,Iɖg7Y'OBKLM'e7W[RMRvSIa2$,$,װEpN,raԼchS*5!o)rk:ׇ>2OoOjZA9|52Ei)`u)?oā j4\k! p.QH5K8Nc,9#aWgϩ¯>ʯs',DWY2/?`tB/*ݽ)/݃4P^pyi4XkZ g9>])dH #Jj05z|ZA\Ҙ)> ip9{ kn}K~,/MW/w>gZW?> )/2<ĺ͏IKÖ$Z(x|7U\=,4K3e_a2)v2ƤR̬>SQ9h&Yfnt\~ގ^";ۍ}m1`_>}+eʟd/?[FBxF -XSHH,4"ǔ``8'gfLg<5pS?2bxke)I 2jCc RO2&k!dk9(n%6h4 (.igTrHR2RD3RKg-4sҾ]a TB4Q%VdA"MYAjwM*bt19 {LWIYRPinmaH^e5 hu%|w F/8<5>FJ%.k)kG7D:k z×;=6?lM%n_@U( QD(Z*{\_ ;/ OA+}_PE,B˥3k ^ԼXC}SVA+>< cOfIOneWٶ}R'`W>흵3MWqwb O%vqQo"O!:ei,sC3-4ZSS$3 .\K!DFq6,THY,3V3r j"'ܚԤDHk3,s(V,oWLJ7ij߯kiQTl=J $*po_} r|r|z2G|M>?!ᤪB~NjQ>/+Pz~3BSΝY,_et~g駇{ kM~{9ghL5!)3_?tCTh%Ȟ(#ng|6[x5rN9imicRP<K[\ "E%1h!$\[/]u. yBMR8NXzn%2H-?n\ P]OY fl1S=fڡM(\K[N1a懚U3Ȼ" 𐖸*gn37ѫ̍M{484s$̍&Ut  jyydhnzlYVc! 7pƸ@:˧{2où@yճbW"NXO .x>By]Ys#7+ Lxgƕ8nyñuNj*Im}EJ*(A*Q,d}* ݍ]ڟi.tۏдf;;oN)oDq! 5u+WBc+]x~!TK*]tx Xtm ;ʁlIs\*:)]gs#`׵t}d0s΀rpyX!s#АfZ+1MFa𘎆*?Jq*!$˱d5 mHHeB8m*;X_YG35@cJZ$n:Dq yYڃFXټrB>#QܝFXWܬ$7/fł#ٿw\h-fK*M[ @0sH{QE[Bx?]EnLtA ).$N B%=Wx_?{$Ov+zU}mO`^#pmٛA?GOaQ)jcU>D HbIH%W EH a'p~+fx)fhG%e[OE)qIbv  iq:G$N"rEZ9C)0)M,RD|) J8X,$P @HPh9KprS :m{*&rߴ \<+yXBg3Qy+ AP SLg9kYgOb+,'&U19ΗƼa-I#a џftFsȾ2O"2jRݩZ)JF.{?BmS%S%!J曱DAvXee :gp̡S-Rsf&S LpArJd 8jQKh(,vfKz_*RSKwŒ-Mξ:̄ք1o1 )i\(F^^>E0AKy|Y!9"㺼RV87|JWXTOH.}^ydUXdA/.V[ rw0z,~o'#O+1/tu}n1 ]?B 7ӯ69-\Y;b-\->O:PEZm[̞y\5͐W>E)^X8W4ݪb3uQF-5VAc[3Oѣ%Nly~|Tig[V.W duc FP) 'DIWQ !T5yٞ[Q's%Ў5gg91rk |2}7u=7Qq?u֞ MxZwҵvY묭Yy7; y`gF-:Yq'aءP&,,<mZu'8@฽a}2?ヨ-;!ep(JUF8ZFcpʋ݄b"AFsJ_CbF #d-FbE&ۑOFCo2}žN˦=\z9 7ˡ߄'{ocM% qdB\Ϲc}yS,m2%D)0hkyq?pdԛۇ%F _o&燗2Vu!Q\٨f Hаӊ{ !>ebf3{v2_'msfVn#IF9ʍl"q6= U8cU%$X#a؞|\/Z͞a46J$ 5i)7v-*L6XQChmm4C+@nsDKuMcdjT]i %9V (Yh]g.K.޵]2Ms1)I4bro!S9Wv1jI((z/jUwZ%!2Cz;얅MLm[6~ʁu% /+FUW213Jq*Lg݉Cƥ'mC) YakLZ'o] !LrCYLf u3V{K+ëO0AR\ml )4yR)88v]jĜ\:F'-*4}t:f+U(lNn6tBVSu2vVi)Zn#[3Oѣ(  eD~}"HrA9>nh_}fk=``fzSb)dd&N  245`8p'9>MN-%7Vf?޽;4fy/{ҍ}KO}ޮKh 7%!1ԝ ך[oP-H{w_g,,}4l68`Vhrj:DBSd MTT>S nDE*Z ɩJ࠲ܪ`P"_ϟlCoirP㓜R7Inw:^p-fg#o1۰0CQCۛn}bMd=Wo҆JI0rBR%QmN$4Ǿק)TWTm!ZF慝a^E HNIa=Қ j,ؚjC:#/A^)qh6/BU.3!rn>e_sM{P=&7c|@1;Ƹh8}08b l2XoL.}0^}g.kDM \L]/# gxަn3~DCxwVJ!$ȜQ/ι/Dغ‘ s>zVE!@ RDPҦP^F)‹3 }eszlvا7 T|+eg92q(WY7KR[̖(c̎1.(v>֒r9thA[s5ݙ+&kF3e`h tQ+EVZna5m" @0u2"LP1nwl(PuddnoOw?쉥3}N }_K`!cb'1Vֶ#YXjOZba鵒LݿDBTՒRDR%]"? m/-/W$1R/At{&Qa^6!TVng@/n4#JRȐMف+BZʑBpr*55VF3;0ezPA~._ $€}+ԩ ѼZ5MW(
    {nPƬ6y&?'F01| v2^=ab$!P E!uGNgCXipތ:ח6{EW2m7&65G{N勛$=E_Y'emJy)ڥeȐרLʵ C_> e/$H<8j!qvٶٿl5 1V(u/lLh&a .:;ۺ1ƃrfNQ\aQgy *PYf֛[onfXMf@3TJH2 )u# rH#I2TC/^ƪ@1Řb1KY] 2\t)q2%32h:=H9VI(ѥz44e t6+-8ːL KBk!*gDT˔"h1rŚ8Ņ(/YqI(g՗ \apD0Ec(xࡄ _!C- Nn:`\uM:{}ˊiaf??DVH85oiYC"G>NH\(_'ɵbK5Nm[ϱqlNoL684A/3o,u]~shN2Gv 'Үqk1}uύ *u]3zzR ,9޷LO$>$7K*;дw;n ٮִԭ?]!>\ϡYUӴi?QW7:;<u?Xto)RK@'=8~ J\cO櫉é{fƙ$˨F2f*ΤY~L_#d!ۇTl"nq"87;h2_f D>{`s;W~l;?oM|q3LQY'R}:Nܟ&/8!UռW6yˢˌqIm?+ͤdp75-4&fkjrMJtq*3rI~HS+!6$ "]K $ uZɂ%<8iSgVމA4`+8K *jIҫk={@SP*RLlCWao-&Vv h <9=K"1.\灌d/*C")?|}vXf r 1ZFwAJI/;Rw]ڡ氵 C1[T2zq; x֫0s8;c& !0ɬY}w/n0 OL[IWYﬕ uBaOL޼=p yx hh2oLZ$QHhЮWUCpmNpta)=,\ϴ;]w}*g)v:˸E-V N!`+ƀ#J6 A5Vkt~.t T jDRm9X|7W<#??|1>`:pBP`NH*6EN[ ̭Þ 331q渴nWU;EJxjUZׂgzLJn$8%O!x` {Bgʂs|9*^ߙgzz=?-Ǫ_jx$fWW_$^ګZzzw󁩥~c&vCglX>;(#&ŰڊˡTľ]?t"1_ + 'K3Rԧf8#)O)pK)2AuY% K q~H站 37mEt8Ckr.}UR!>{=-܄3=\{즲G(P!/\EtJ۷n-Չ}GvBqn1֭ y*ZҩD!´|$lk'!OvBFPkRXSvo߼s*3Kt(_$S.]٭~Ne*\eRO46ӏ3wqd'4 H#Gvޛ3+=z{g&|qzoB0D T=(P(Npa8NNя}rproDBbJ ⁐NEqNESPO?LE=NrSI0ZSr-˞m`$]:fSrFw."ƅB0ȏB9pҏi BW޽ !pn1*q>G|z/g%.݈;7Q̜Zqs']_2#ߒ{|xCj%uP7 ԕ%ElbݰÝʷ<ټڏyfҸf⌨ I% $ʤl͚/ogQ rB> G]CP$MoY_x \,.3?-١u.-j28D&D H-i(_p9u[o})<&jGi /lmUq^|x<|bBpg N27`O6b:ODh* r1X}˕C.h͌Du<=^FOG79@P@H"8Ìdp:P8傐4tˏoB}Z][t\Ɯy2&/u $q_>9o> Ψ>$c\LB h!~XX1PD2NQT @)Q ?}/Qތe@n6݌VRo߬(L9q_^ҏߔ!G\ `|dTTSBB\_5ZGQX H H鴥e3$mBrٽL^eh<@!x(-`a4 iX)f@-t)nL2i&1r3,E, jiD֯K#u8U*++9%KpV)ՖdTgS-#CM@K#pm]ʳjYuE 14[ čp>t@(/5i D0Ro)O|s6 r_,1S]V Q:jcq̀=JQyΫ}A++ %hLm3 1}Rx8 L!@P_2'dy<,FKl1 Q!/G3J.$/pܿ,_3H$'uq)8N˰NSgpD9`gDg 7.h0OcLy:sqE[&WqZH0R 1}&Acf8i邅 _yw>zͪ{QDI(t2 Fg xXٷa0&vuA`93 ;Q팑Aѡ81RʝKAjf~Lg!Y1ǹ%#R_޵r!K0_$>2VRQ6I 0)DkjYX.50cTWK9[QATGbb7/ǟ# ՁMXxׂH8\pO>:Igl(&Ltu.=;,'7jJ/P |bt[=V/^ff*`/c nx!~9` Ŵԁ!~1)emw d)x.+ (r 9*zT.= RMu R RgsR83YJQjRTk VT",! B'oAͷ3$yG ,N 33e =zݻZs~$1[Se@QJ-7&l>h)ϪP+_ifFOǧū־5D| ܿFp)?nԓnPTSg|6pП?}΋Kl=误T8›[vo]sWWWkdQ~;G,1Š- C AFPO^ ?- >9 *0p3dZH Xn"J$08ReH$Ts#2c3% O>>e.f6ڇc}-*WG)2qt07_4L@ߢo֟_C絏r8qg?6 ?3뫑{u>7߹KkmDzȗBg^ vvэ҃@̦LLOco%FU+չst93[bӓe[\i[oO;@9*MCGpqS'lj3hj+gy5"ֲu+,Pf|AߩzBFSD3>dkHEe?o}r1WHC(XAqe~xo<й΅wu,Z3sa>ņˆ+)b# ,S& b{7ĖvaLC2p# iIJC;{=ˈ47SsmF/|Oi8b*1=kO/7RPpmeh(q )Z>r0:i7Pa40@F^=2N7;vXjfD}nnlmf5ˍJ4PTqiLb8}[KR뫜>рT.oοjlM ٛQM$S,'RT]߶VwYYgp< ClGmS,z5%o7={Wi%O1[%9$/{yh3Q)Qg^;UP.3ZҰLMKfQi:\P'&|)G:{?m T4[秱MPh8l|@i(T}t8hR{3~:|fmVT3d{|}{yy12@۰3\C &WrQXA9PbV<{rs//1\;? }Їϵʄ4lSmO/gz'eOc K2ЩJ W [bw%{ۉZp -4+H 4 0)Pa4R Uº;'ZXa }a+9 xTNƅ}GA#*+Fl% - Ji\*9R `Ga:MdhAgt9Ama'ATMp-S(Xn]U@ $咙1%gU[t Rk@h1qVhf-'ZbJЪX+XɳNXB\"Y`59-J:A"#)ƚUZ;cWR'q=4~K5Û.6+Y5[{QTh@"2Pi]mʪT GIM  a a"z )2$V[YńmbVJ .J9G0|jʄMN|㎛J$VxKOzJũX_^֜wɆX͙롆ls”O>3)]}#;᭫C6p{+W~;@Av=6N2zwq9](&M UGɶnpH2)QQP\R]~=aI~FxJFq3ګ+$x+S6wv^5C}SA3єK {|~TDTOwG\۷nLRwBӭ&-Ӭӭ 돛VۙŽeNհj]e)aͤݸaw7/Ϻ9':`;a.\lԵn` ~Y?}ƻ.wrfl>_~8mn}D7A1:q2=}|-}6s/ >ǖ h[!shgMp۬>w0" a Z3?>Ml}P?޷h(FfdHaG/C=a/djgLh1^f3~(^~mw˴~!;1m^?Os{sWǥWcf^wz2 ]Nj/YL% ^ȁ-!K Yx^=+4d/i` Nh،E:{Y# Tely ޥye%"}*B={n.}5)T2җZA1㊡_Ԅ15/v}q>8;J3 (*"+_WԖhЪ}NVa$@R̶/3 O1e]x߄޼w&w^'GݱMnn/ѷMȺnϾ9p&`#rs\H1h^B#E(!QY1tNQDQ0I@ -9;+rX0Ba%YNJ+'IGhy!KCi!ɡ | -*F9ⶩ40Ub.@y *7D C"G|eV'6dbTŔܒے ,V|JWP.m&MGJ@*(ŝ%QqO&s Ha¦6]X4T! d욕*`n Z),+yHGۗ ͳJ;N14dY&)K N:_ֺTm.BQHsc.kz~; @74&1:i%U:#TWaGW H$uZWea0:O\bIrzՓ_sNX6o֘Ϸ>{VkNwX7XDK@!Dg_e3zEyA>kO1A~J*=vtA c隫=~ϥjžt=)u(!DK MC~Zk~ue{mc83Nvy7ysp^s1᝵|N ٓXQ]7*/4Du]  g{T8p)BQq טV^ %RHKI+H#Na4 840ԹKu;A [׉9xt L< \vTh\A%fi JW*),eUpL #.f= aOA,1!d1 jagWKD,AuK|#bSzء>^! RJZ8~B9 Д-gW2r_XO\f]ql5EҪ۾SjU';`?~ƕͤvCߜX__dx~]YQ-&U?;q䏋ώ|' >QbMmtb JDryM=cf8 M(/{v089 ˟Z$5xA.Q} O7vCqdX-ejFK)EfxN'w ֊H;Գ)Rk"u0ڐq4U1EUI \86*E!*8 12ਬ)r f]p#ЊC$~̲ˇMUxQMLtF(\`z R<%T!{ꈔZq a ! PDR \?7da%0RD0e…(\S.R#jvYEA )*YUlaYPNɔpxiQ)0+8a"9Ua`DN( UU iYVEq lƀ}t !a~fpMnFSX'ǥOwԝWDng)A(5y7,mcC}~!}v`t>PM*8en _%j1v9x{ T )Bd;e w=~[Λx~˼ݼ.EU[} ]47PF%@ Xg"&J WDBVRhtsԲTilaUG%ZLpSXbSiZy6M. 8WeV5\hgFJ `H[hH'=0VY7 #֕4/Jk7rŴBFrM4!eǏf6{--k[((Qѽ2֐Ё A4н<OBy9[|q8%~B"kGB*8ּ1ئDcs*0VJNW PM̧ u;ȱE$ ^U @Bbh4^^@LGȜ2MȦF6*WrT ?:m_ל-0YU/vć +yp6/#"Wxukqa Y7^QGz>V7vS#$h{'Af\}umG-b1ХTZy<$o4z1:9hQp #0u#!Lo=QQ!*5\6 ^hZrƁt98Ƽ^[5vEoQ[gs÷}b$tFyГ>Legb1p: !@J+Jxzנ;0ةڀTH(KZX \i5A #y7T=>~z|0a}Q‡Zfln!aٶUS2of[XHR0W|43Ҷ8Hنk[u^tcK;l/ dmBHC-]~;eC_sS@gCH%y|滯wٷ5ɝtgqù)Ɠˀ;7׍|^uK YOnr4xz>xejJ/^+e{Na"/;=BQB)}) JKWx?.\?\=P]Rk 'JsRg~Y~ aZj++afOeIхz })zDڶlSJbGjMܗp$Nfh#Y"~~IЂD蔴#*FnJq>_]k\4wSJ5mrotL[qyƱ֛^Pb%6}% !Cdjn2tYjX٧tTׅ6g_Z\O9_ ސEP1vOWG0 }ȹG|H*;iDrnHZC Z[6 }7D9A7Xȉxn33*KR8gu鐁,S[j#u2RܔhOaH+eSԂp<"T"ŅwVj63.Efٓ~ fJ'Yf[oZ:GUz7HE:2/\2>`o~JK>`>VEdTtۧmH?Ưm8HK1&fˆ9uo<ߍ׻xGjǻvR |e5OO%J*JLݚ숞7LYe.)JnQ}AE[1_ݥs>)(mgJ'ne_s/]({N4 yPՐ||J'l̢Whi+e$Hk;TGP8y(2F!?6+)YTMr W(Ս>µ{2YLE=E{:1Ǭp+#!"!\Vo~>s.pJf$-rns S % En0/PO~ iדR?~̽zê#mfRnhxC!UU=J1ĔBpN s~Z@.-F'KˉJ,lN Bt)%a(J)FZ ~>h+Uj].U:cF(R~)H3ܐdHs/NR.kubv14or4J3%s#Fhg pHK07'[NMY= B@%9kzȲe`9β%{7ECAf)Hʉ/kzUŚw*T =Xu?.]^ק{\~,մΑ帇8nS o-&o~LQƒ@kW*zx*[],Ӎzzpe֋K/tQE`8Dkt^5ʸ6}yXQOW~?@;c>)9B\'$#BrηWHw>ݣdR#w}EE+s!*c?PUTM@E-U%HTC.M6_E愭uN&T2ڣϠއ3S~MXwax)>q+DƁ2W< !y1g<}vq+t)dj k>V(좟:}ɐefrFs(u4|o/^J?rlodYe=57i9 [4([]]1v€3Ucobo7ѧZqJ~B`Ƌ,b |/}Ooc?^H Rdp4rs*Kj '4pExB#(. gIƸcDڒ0sA-K UBjJ-MAM滊c=i2/^(ezq"l 3f%ZU|ı"pjF60RnR5SgKcnK8oԹl/AR cr;R餹hd>Sʡ3ʪ .Q=.n~C9x<#֌W>1(Glusdj=C$w00J' > ۓ%H%.j%bg:Ԗ 4E[֔a7䤌Q;Zpk>gϔE'О.( }}ٛw]VXVdPeTVZ+73(krꝩe{( 8t?Rz%ّ}:+Կo-"F)g*b8U]6{|xT"\s"{CfM,Ƨ *wH4Oa}T^fl@/v <eSԌ:ݻ)WuHvheh Z8T;2 M0#Pem kƳv,E|VQ5NM1񝿣W m)`igW@.gmcNc<;VKO]z+?t*8{\>"pϺ/[FcGjܺ~~Rop1Kҽùׇ X?y7w1-$'Ȯv_i/w6D佥 vfaatN+knH]MCzpdc< waPB7g(Rˣm}) &EQYL% ] Bu!J 1ɤL: TibBnx%ǟ"Y{ZC<k4p S!2?Ͽ_ g_}\r8)<n.0o1BZp(% C>k ޼Ns"*^j/fgZgGg  ZmωT%] !|1|lcJ+wr /R|!5_8`c@mK k? ⻨g8aTNzV?;oQBI1g֜?]c8;93"Tvt"Lm aa~NHXc΂kBDŪLjJ?72п߭]8Z.*y~7(H hƮGv: Hl> N? N?c>8x5~D&F3YHI&^XS+,Ŗ Wn>Ro_7A3@7+= 1؈`bMFY:f0dsB! JBmKJZTa:g\]_m&E6U&\ƆF6 (28J- x4 AXsQ |INg68 ɬ\cBD Be*J*P%"$y -YT>bB*%,ru9f02ڄQ0 \&% >0øFEr XBP:hH+Y"9s]P[Ȃ`{Fe `$hW.?;ԢÿCȽ<_'hԕdmnHE˻`1h LB:!`bb 6" dsG+.6Kn2OUʹt >z.Ihl CS]s*!-E>H kEt2AY7=%օRM8Q"e-i:ӚvٛV2BÒ$dd"B$ceq%V84iBH7 ?=wqqF{#U/H\MtK Χ= Rq sk N|?g Y^Įێ͎icqwϳJw?]▤dߓ^i7 "|x$tyj2@<ݤ?gt"PF\btŤkk .|/-%^4\CxX)<{!zCNYP&66+xp|RR*a+֠ٲo Kv #; k،)\kVZv(ny'`肢(BFVʵCJu0[~=rOj`y}~Ljqm`+֪6}D65kSvmVp֢ڈd-Lx7l#Қqj`i ߮- :H2,mQ rLaSd}k :LtDQ{@UD&V? 'y  'ZJmqjzOt7{ƴ  fX[OaL.˰.+anh>RI*ɰ4gXFvDL;Y~B`v0S#pSUÍfjG}²Yg'/C Y~)yT΃{ĿΒG P C_1df ֭k땟*jS;);]aq_)tpM Uvu~OvEuՖ׃jx-dJ W?/<6 E@ku֭WňJa'0vW"*eƮ"t.k_IF ;Utjr])%ZHyt۟OF ;Fwwy|<̵ `,ft>YVe3ąx;W >>٧0; o>`пW "^оh׌Z!)Y(_ʗz-^ t'yA9‹VmJ/~ҩx$W{"'\06O:_ˆ LWuf[Cֹ )ஒjkLO*p `6fPɳɺBXlw;Al60FEǨ/һ,H]& N|(wq/v~dLg C;JVqEctKNɖ7{e#&2IV>ʬZoEr"%S xh0ʟN,6oܱ)ܕD@g|.=Аټq_r[)=z'\*"ܾc':s궿) 9s)J;wyƋf!\3^?XA6 wNq)9})yY~/o:NP-=UtRNW. f&7%Xv8xZjK/{bI)z0<'yl<Ydcd?8ӽ|bt:OWVa&cӛ];LC_Gv: l>g<|ƣg<|c>ß#׬dGa4nHMhЇـM%D.C.B./kݸ{z4a+Zɼ\?>ݿS)Ԇ/8$c YT4jg?[gBB\Dd;IB Q_ e _6(iOnUHșL=SE.eTN Q#vY.kB5y~oetbDtF9oޯ'2ͮޝ-E6l* <% ^bXTu̕}Sl{ƷVNΨOCxYV 1Dg1Oa(d?Y @o˕.괴rP5Lj# Ɩ]f ͱd׿Sx%B1*$5+ęWa E e_7rH#(+Ț-T̓ܡy{6\1s Dա2Xm߫muF~1u:\yiް}x^"Zi/:DӣZTڢEfu;g'M`_eBj".vS[">;ƎK}7.:!wɟcPh9Yh~`Õ3 (VcfӴ~p m*͟SN87Ɩm>? ^ }~] ׋{ObDœ/7q䋼`v M]$~~,T?2XE,be|Mۅ+wM!xdET"X#5$̄Z鈡$4*bDH>~W͏cyL8 th,Cn2!Cb)$ Le >۰ȫ̔!QHR 0kʩQJ&\tnU(ZD ! i€E~imX"rhl$|dcduu*:16X\)cʻ _*"0:q ,d hGXBPXJLMFJ8K|kmC rB°$ABDXkd։Z4]4L8QלL+62<&Q5wmFXl#UVw?]▦d*=[^nx3Cfm "UF_^ &قp0"ҟ'0ξOş/i>- Br0/OO_s K?pf>-%[^*XZBz4U땞Mu.=@..YH"h9](+=/8n{rj, J"V`#Sc !FETjF"6H#Xf\Aڂ<G+1RS6n>Mw_i؄O7ykȈF?Gw*P|M4f3x)VE.6Hh:o|cfeKjl$IBqd5l}1l} 8G!Q\?T(m/ Mk/\_c".qj13 F*dQ("NR8">Q: l/C:[FZcIs٘4:0i:C}oz8YX#r`ތc{#:kw,knsqqW[-SY65loz0,3@,cFOe#xNxw yG.5P/VA><3fҟFβOs17_j%kQ Zr9Y gU U@u[[ kݑV5pr7m=]?ݳ5/ :n*a4ڤcWpq0zg *om(Bɺ9z"z\[OV' -]5εO<͙l1Ř7=1'aZƼL[\nYqgCUh ,ܺffN#@9rw5Y}g_<N_{Ov/V?' < ֕ڊBDBv7p4517V844Fcc86X ЄHG'FcGSu{Q-Ykqܩ(I„(cˑaa!km$7EK'i?xdIeI[,y;Slruc/ǰVUX$_!"<"uFJH-%@5Ĕ )xtvL,FJ9&XQ ؘJrkba̅R4u!'X{)qh`\&+R[JMDi\>?>gYEcm`̟Go৳j܏gEȢ01f,'@$'g&/!FM ,J[ x_I4.,6LS'K$ȼ *cȩR2=RpJNf.Hv2VJa]H[O[mOyÆL|xJ}[\o-/7Mo~mīTKv>N)Vd~(R(%-̓eݳ q;=XLkݐC7pɵBKԓm"!҉甜+Xii&n-kHּO[Ü3QJ/X K |&YZOw&a ,|vn]u]M늀If"IdY~_}t-(v+c@tTzE Ԣ0tg%SOQWzҒ/'Tv<<^Jf]H9"c6ͱPqEeH㒻-Ka,]$(pf79OqF(-S瓻/~̬L}M7ϯCj8qR򐚿cq"9&\Do8Z3ZI@ycqRHi4S*i>1Khn2Xӆ.G?y#ӗ0-Mo|5n|oԃ9M` V&nDb+hNUnHhjDpJ$k*xh7Ճ@ ]z KpͰƐHE^᦮񘻕qPL〔!:sBs4XK@^y+X0rAoKR3Z4jLl=Ixfi nT- Q:"X1e\c\2,08. [. t4:zC^&uUj+,ҧ vv2ŹVeN^Kϩz W˩'-SZHj@GFho3U9RZ9RJzx#UUHꨐQQl;dӽ+s-=ѨF˺[ q{?pvTPd RΟ 4o^ƀ8j#Z?ZD4VF+ue{6&xkt\/FdZmv;SN[zjaH)񰶳HbGzJV{=2+b3x"?R-N3n?Lniz7Kz~[ H.5ƽt.3D4*gV2]D)Y[`vy>7Ϸ6n73=+kT%ߨܯ~8ǰ2u o%}ěCsu7ZiM9@HV(x>e7F𦜽Ɖ+bp.9f(1k))3s9M VRwʾ谤-ZjWk0JA،Q\r S5&Ќs'Haw\}K߻iqC{VDL$6c$יE8ϰrΑEsnI+)mJG#Tz${s\鯙}NW`Ì.`\0,H[,I%G Bdߪ(6g"GoU]ҘXm}砸e9<|ccG:D8= OOut#U@TxIg8nF wvsre*Y-ک zIke rB\uXw/ѕџ6+H+܃şvwuUցNiG6"/&-Sߎ8ceO!2‚ȶpOz 5 Oms -}ډL)_BuO cAXLTm0Xc:Pbtl7U9zfbPA~H(8Wr@eG8q-h)SIq$bRG/Vց6wH!'pܔDHƛHҝ̿ߴFMۂ5a]DoƯ8ξ8;.ϾKfAuNQ晃0C(D`](P,-VfI:crs"H7 53ㆍWC&r6I*€pDZ%s_.I!iH7t; Dֽu4r9Zh)R+Mv*B2@J`DB~ 5')^ƃ |ZlK:B _aD }_EZk!B0+gb(_'D }pϯʷPRv뇠jbJ%o%PG|Ř Ӭl!Gr$XWAe 9Q[e|\LwE#)VHIc=IK. ք3< aUu=.}s3yV],j|RoVf ?fnw[V%_XzGdf<SH!!j<4W9]TzsA,{P" $Мx'mrbS( !4Pct6Zg}W WVy"5@h/ί:fL/b.+eRT~J{|h&ROvR+ݾ%5TRs ȓ.A dto_S$71\ȹ&GbAL&==gJ CI{h+]!.;'oݨ{kkz"eaiy>ިckQy{-vpyPẴݤIzJ' nwln,$!C15BERe"׌Yni53+ӐcMas:7Dtx]k›H8+_c:pbNLNLN0,Sa*Mbi0 +0!P;s'YVb&R27voWd}yK8m:C։M{ݵMEKܷt}WmS"eyeA5?[39 O,3.Or9|s3V֘<gcy?Z0he|{WLX%j7Wn11.'.?+pV&OK>d!_6APQCnM1H1mn\NKI]{MnmXWn{皪ՐR6pg%_JXjs\YCݝ7q$<nMXfrDWg狻[{1o"vd|ubDRFaۤJ`؞4+%k)4q>BSM5Pߧw)KiO,'R+,'O`"JVZLsH+&uUjY<9YJ \jb=lzw'\㊖fec :N@ݍ_tNW:^B2G3>{ >2HBY8\2p^m(J!kqls0)cle3 (B(~H;IlAҼ>G_;&hU{OBiDΠdq|JꮫQ;@;F'9CO8xMݫUw'rX?B༟*bc\@W'ٵw1is=`2lsqM 7?$eZЃ;XrCAlHQLc\h ;b}T~5b 2*.rz57a-"Zռ)/78wك4vC߀f%N$DKn."9s?6c?6cX\ 0ifxD)NyF nmɬc4[;"J;I^R-DDp'1˔%/5. |NAA㔆1$B};>9g)Ε2%6fb״ajv_uv\ 57sJ14 2hoz2 V s6MǫvH+޼b{ gۂ@a!\1-mN!Xz$|7o֧ε wS;Í3V\Ή3)=IXCW_o7ًym.)!;'{7CW+p+>{3=%ך6LF^tQ**9njQ558xwZus9m Ckk\":@,9DAkz=w9_diS7 c?w:!L;ȞC![;Xv"켒t"D(-)t*/;{DKJ )wtCӽd!$7 v=ݻXbҾM5F̀p8 3(u"7D}!Ɏg5cu|r@*nj2}X#@䃊NgJ1:jh0YJ:.9<#|r7ZDp*tΜ@MP0MWEjt-2Qs0C}[eCBC^i\]? !zRMȧ ۉꝦU.m^4Gd[Ӿ4\wn[%6wuܢ`'ynt`'-”T=K+׼R_C\p2{5i;&KJ𶙾!2ԩ C4"B@v_$)ؙ1k'´Ks4HǼcf'w E).EB>/u*Z?u?C(Uy'~[z%b={1Ym9ӽmQAvGdHg, Ԧ^΄$b)mv]vqМ]s/`VI;"Q[H`,Y[ؠ|[쫅h~w^%\x;ћx4n.C~Ƨ_yiwY-Z<g(E0e4 c872TqN4@J]KiS$zE j[Ggd J n:X}t2\J 20wܥJQFfji_b+48hdtrqnE'SNNNN:9XB8{E%qH6PdCHA, Pu0<)Bu՟-̏rz5'AwLj4P⯦՘陯 z>=wVfV$0oRżҮ;W+4QT#!dDLp.-R2/JDBB !%/,,CQHb#C뻃1w7xteOoM<6%C~I׉#w7[w*|ʇ}פ Qߑwo~R{`RM<_p~ \3ᢾqF?,d: LO෸zULͻ,~z.]]uDaO]Wr sWk>qI+,UHjNyYiN x {llVLconKr} 5N,b,Ǭ7O| P*m=xBQ%1Jq뎦.Xƥ56@L֧t-S-5񼿾[9h(^ɘ=3ߌ&on໓覼> '1zb"?\{?x A"Wʏysk&gQt}1)FM-t$`>أ_}ukS:7JWôXzꡚ`%` `ȩ8Dqɯ>sj~R=YK)(qb 'jWi7sd8鯳b;!9Y]>(SBT&b_ z;hƂ4Ku|y 7I)77c4Lchhӽj5@TI!А\=ygz>xO1, <ʣ{n!FȢafR.5T82_8zrp LHOI|9?Mvǚ ,mO]'(C܋kƆF.+,{L-\ox39RV?/5G!ZE~kzJEPKQP% ߋq;(2a ),IE+Z`⩴y6m1+ mrg6)l  &iLIM$ + kW/Fn& H0sEXAC_\{:[RG  ؁(hT픠'O#weSt*FΌ2 f$6%4 ᝡBH CF+< HPY 1a魜] {f0\AݺY4R-Yfk2D ?(GR ,Il2zHJyzGpT++øɗUTnU{+F\ٟ-X !q/4R]!OEb&ZF@90%>A': xZs)3VS)Uʓ[@t#ʾHY`mK)RMW{s|1-HOI߶w/t[G_&)_taZ0gjJYׂ1΂h]klI9B(!R"SГ!A1%V`g#XL:# PZV$Rs])ML`n}wҁ([MXsvGw6#1(筳F4-qӊQ en]̆&^i4ȅ/t~$ۗ80MʬRVrXB"=tbӈjxb&Z#)7ʌmKes(RMAj_> 4o9WFYpJx{V>PJ%q uTW-XBo.43S9@fYnd` DAnMyG^ IABK;ӧP ֗cIJBpkJ{ &9Ueh9,P[&Ofܹj+v[&K \u z Do!̜*#/('vZezjYdG~Z\ÞӭNf{Gvtڃoz)i|g)?Ϸ!./ǝqL2ޑFJhZ^8ŗj{)Y\S\Pq )߳6K6PNѮ `-~w ѩyyv*Ϳţ98UJ/n<y~ޝAބAgӛQ/#3UO"|ؘ:49Cc|c.`9(<cJ:\sҡ"S]@pю1hnx"uoyg&'?YSs #J>l4̲ܿ1O.7٧(=rLS@Q|?\.E& /_9rx^qaxGNCp.G>h|!6="k|hƵb; vEhrhtoi.t_^F[0VjuMP1H#`XI_eVn?h/5?y{Ӥ40wIGB2 8*IY[Qq ԃ gI}˹ * Rck9'7%0F+I+z!E oWk@`bgf9Ӥ7'~l7A|2p.x&+)cDjcYu(ke ס:_֐_M$ b)$w"&)8J2F*A-VVz5@V #֌0t^L~Fp<$K* $6O% {tuG]Q^i &K(%NR%"TFkc`VHB/4TKi|gafD^;~B2NxRR)bʧ DSƤQy4>1RؘkmbgU1c ^m4M1T+LX qߩ%R-Ix"cQS QzmM'].HjpmuVdr ֒a!A8RR.X*ށh LʔT2jCR$e%Z-UT8izHZ'/$\ZMl*)zqB$_YY88C$A!ahH1PP\R&*t\A|ؤ:dcqPU&+ ,fZ F0T4a>XcJhg}0zW.QPB<#.q (P1!fwOe"\*@Xh{)Sn80[1WKTu(s'_gb S{7߻fՃa9.3KY}Rq]]>~>]0nvⲏ~:!LFF=B%ooFN?>ܠ}Š_~g?w w0M! P\on/*P?!SCϻ7]hDPs* P!$[#w6e: 2U eg$XTD6!,TN(RS$ĵ"lJ 1f1,f'\h+'ze@xb 'w{-&q,NlrJ3oxFS2Eg2B%qnRzF|E0O39  !AoϣL /? 39Z>a~'|6 gd o'Sp';C]?;Lͣ@b%oͲD|d igU2KP:r#6qL7 ~oP e]JƼJA6wFׁ''Vje]b*w8ypNôI]XR|>Z`4 N":?<PUb 祉0j%\9I #XAyź e(L+8[\PҲ[ZvdzU4qV!YZ(͈$+1NغjXOM3RnG!G|lrY㆛*"$%օGBo.]Vκo6"Y!ŰqTiVls˫[BtW([L%SevnQ#6j;)~!6\96аs$eޱ^b=]= 55T60و `҅* FEOGTA#V}K@nhmZ%wKC*UNuF(KTqy)Be7e X]<`2d4RR@g57KJuq4?wv]\K]W/re4 /eӮs2JV(7R'1jwl+FB8 wQdZ1jyn-i<٩P1V&f| *&$5pCAKMt#5XCrS\V 604{ (Xo=?nt#2d6uCyM%EBu~a zW 5IƏe aԝL %{O:Պť|bk lCh{|r2&;J5S7Murp+89OCm > wE|9X8ި;Syyz[hЦI"^ʹex$kү>ӟ.܁z.Sm,HCEtw PFtwvIЌ|V0lR c,(洀l¿]^u mIs]SGy7s|;+Xhu _ߺ{폿GO[9yn-G%RK~ Lgsid&T#Or-&_<6EٳyNQz 3}xd/bߺ&2}\ 0(S_7.Yă\"ioÒI_ ={-̳w6,EO 7E377KR Ly$R_KݝX*Mͺ&?6 kݮ(ōY-}`oFIgL4yifCO0sAl,MFkHMn+ƫX~|3._Za8H(MZp u\_Q^j=݌A=wm7%Qhv4҈'i^Nu17pr'N Nj\8r:J"bC}gϸEkcCYd$ a*uGzv3=mr l@'-tãpUP R8a yۙ,~(E|_4)& 6^BNFvuQFXpЎ?%^EkY*7_U)@Xp=7 Έh W ȲSg7ɐSg J2 g$Ȭ<{7~]prNJ5L͕jS$%GssJYjĈnOhPOYd=J^5iy?+s[ihsEșԞo }vsAZ1! ǵah eQ)WXFF4|<A=o8ϫ+qNDz MoF[+,Z~.lK.z)n74H=3J,UU]]uLȗ߷ $/ N8i/rnP3ZֆtZNѻ#&r"?&l2#,=].>7?u,X wt.Nz+j){f]ɗ|n%o;Ĵ]&|Ӛ!=Ԥԕsl P"TOt֤d>Saf|Bjٹu/#=vFd]!DI&.dݓ]j:s\~ySĒ-c%#5mL6^)^BRfFAvߖ̌oCڋ [X`g!1Q4>(8\Ѯ6{uyäLс@.b@Ǒ%λHY嵈9?CpƌFa>Rn\KԦ(MDJCꂛduc*nN3JRX eXV0d!І!L75|8ᔿ!Rs=;eqKQMp-p:"\ {A(NF 6Jto.0ǟt\3>H"RM) Mֈw\T*2x=0|xQ (yx[G3sawSх=HP}ጫGgf2z gQeX ;p~u9PUIO;ȴ~;pˀ&F0Llc,P'L ]_'z@qȩs z#1rkW ifoofp<|#ϕ;SJx+JL/e8SO;i,wp*IW'0oP?GDC`OWo SeH\)AebqQJYO(!!:NH LDL:ĝu5F$!3%/a ʛÖa&._f-?vVc.o %εE;L]30#Ůӣf2rEwf͋N~X׻ P5-nqq0"8(g!LP6%n9nGg6B樐uJ f2 :\޽̣GAQЇH>&<@?@"WCCu@gҡɁ~aP8BG>LOf' BNEHdTyH2@Fj-(DD!Z)4h)'޸ckB?T:`  l/JTK>t1k2_μZ+dKɋťQUJ{/RwS`pױz OB#sWՊ?LP94i5봗U+r9^-$M?mL'~SZS-X7{vX]rc\jٶA<3Β#/n?qxwEnx|7ZM!.=_ޜU S{5?z8j?_vSoe\6Bݹ%y*Z^Y5/7 F>ӿnBz(D)o̟uؾ$*j觪ǞnvZ JDv.m->V5(o\Ddq|W={\ (SK9jF>9$Y,@:~Rc>b/q\x_蛏Mt:@=GG9QJu\\س"Y ACjۉ`*|(gT{G"4Zs >Ͳ*^VpJKl ZoA_䛖R懜 7te^>)R 3uCCa[I[J*GueVM +km:.ߖ=)rI5eOdE( /y9qۍb%.fQ[1& NUzۗNTF|D3hϨ޽uʰ?6b4/zg?^HQ,iLv2ɹ5nZ&ݕ8[x{0K懁w}J&QB\)$̝i.Ibgi-u;*-fľ28O/;΢Űe`WI,"F\ U%O.tW =8}Gi:wS(N\ᅵ/|/-cLZS_2Y”wBrN31іd}8S^+s¯ =\ZX=0'\ .']Bhy@_Chռ {''__9C/ߠTcڣ_+`;7 5 kr"F#G C9 e #9Y}NlЛNo8%㫯l膷GAwֻ~ֿbȸCz`4Oe=F(N{"bW""-7,@vY缐PI|9YIғ,\8jv)_?8CSk+<};:bgx=qN?.yz Mߓ/* P?yN$h5J9Q@`%{ن`W`AR@d(Y6 n$y J8"(r"~ V \a.Ќz!0-2369}H$N:.XL1Y{Ğ֪[O5Ċf¼6)Մ H)>HNrI1І'"Pz_Q&:P*?f?G0yzC+[2kF ez,N}uL2' ?I6O ~qz9!e.ErKۢR jUJF&L~v!Woj?5l; E!o߭ \{\adUvP $D& ol\r-1PQ'P|f$3!h8AQQkژB5% 0IKP29L930JJP5&CK4ܶ&)B+/Nj*R$K)r0xo8aZia[5ҰȜL("!:4c/+ASFPLbLA گ7h\jcAPM#wH -*e'bw/0`G$9`-d:Zbl3"OUd֥{HME\.ďpA<<<5KE-Jér iG?0dDhoc}C>\2<tP *~E%<.w Zۃo@s9x/F8h&Al" /F*B[l+rKKE#*!}3 9PteQY⸣Qѱ⨿6DEPjz6vlߞcH e{*p]%ձ]L];gw{iը:8mEm4kE@V喟T!2OD$SRehCdJ ),hZFM:M *ԧZYnxdgi.@q3'q\-Wí_'pU#'n?`3E.>,7X#tQX.XT "ikaOC3_<=&vYqM I;t[(EOO'_A*pB^Gd:J8#[nq-ן.Pgtzn588pٳK58 #!SB0hj8YUqG6*)丼i׈pm`elS<ߓ;"xNFOĺC* 4X$2F1K)ehAK'{"$ҋ < 9$$JvtQ}T _6xC3tjmt_ [' R0+m0I4XXNac u!M'օK!T3\Zu;?ZSAi!lrB&,MdBjȁk&!),˙pvfah[Cq8o^$ƴU许9Y0 㾼8oܗ0Bz3DK!c⾸F9R(6hO/\58]Pu5B'#4JC9 +]爘j^/^$:}=y~ w.K.!l_omyAv<ͣ'h^?>Ϟw!jH_iKC MgB-.eC;Łc[}CxT!C2;~]v-_^ m#M3د" ؃F섽p g,5ZXAlWv 5>C]It ZAk?Uy4J ԿXZ+o/_XoEPh qռȌf3{Mj@-ɳr{ 42>s=I $DCɌkԡb ;l+iL0XCa5 @!WZS6i]e[ZE0hQ-*n/+6mU8$(;|\ Jp ӝ3p7 Qej|=s*kO1ܭG;gQ~pue!3'`ڪŸ֔6Tm+Za(@US:W[G5Q5{˲ULIykdźܤj`v旻ety5`T\ty9ݼ_wt9i ,1Y W" ]4+ٙˌ"a3*|ĩ`7G̀;G! xGaoWCZ,es K3䚰iJ0bk($8Ӌڰe{qRphĜ$p *6Lr>d-N7_j K޵RI U; ai֍ǎtj s^ni¸%LSpb%sF hΔqTgq\0Jw2irH@6Vx%:TBte +B\V Y=Y1gz4DZiv8@W)gIƂ8;ʳr\J{eC|t=#ݼW\WBKBˍZD,lSN־x<ոRjqI?9UE+1JWt^mȩ8Hƒiq6*`F>Ve\٣o"0Zy3J3r}ʟ^6j}L}s os)yyhHB޸֒)J]_uv3TB DtQEr5[gn j&$䍋h-8V:j ?2vjv%p{x9;{Ҁw(mJHC}M۰bۓu_z6F? .,~s!ϽPSh@Q9Rf_H .6Ng>C8A"^Gn@ObhfFh>J AZb*IX&4D"DY"De)%^%X1 D%PjcObOX͂=A=WE28S2%sPה&Orl5~S;cYw7.dJs+7w%[eWӝ[`Z8N^ՕeP*Igଳ~U y"zL1LU1-N7G"G- n+oir p]2t+%Azl`u,O>$Z%|,{sf5GZ 1q3VBA~1*&6 )p:BhVZ@Ƈ^dաK}:Z#ULVcvD!/n|aalc4x<;uE TLY__\O.ǣrTsrgpϡh"} M4TΊWL)ɛ eSnL_7[)MfozH{TMHlQaS1ő. {6G^~փrHv0Hh_ZvԤ_`U*.PN4 D_XS{(5Mhd4;ɯ3Z8ܽ q)=RN* u'θUۀЈo֢B-.35 \،"v66F3;y]eiDNQ~`w5{&y% ?TYIH6TiH"vEkY ]n i<x@͉s 3 m ;!~KgP(PhC7O'ސ2bbX DE4bKi,urx;qnpBA]פ :nx[WFީzhZo@4K(t?~P~*HK8tTɟ#?hȔYt.&-% =y W0k4:\=ra sfGj!h5/~~]64d Y?)Wnک\ת4pat3CERn]''va_/ceg\\Re{=.{/ xj(|ƱBb SLh*ll-Ue64h;1eWe+ӭ |HMy\9%.cr)...ds( __u J$$|~n}{`Z4ab~[ !LFF7 ~ M?ǣ.P *~E<.ẃ0z7% Ah-qx} o O1Ío$RZR+TH&X'1{&QZz{Jw˴ KBWt *HNǣ._F N #+fDe+mE޺[yoE.ZH'bV1$m.!֌Q.r~IiLTRu'Iv@jp[+׮ vt5XdDl uEٝr9jw~ Τ)ӊ*^_i}e YY4$&V1a$e`%(f+J#)̘+K򭕵VF3Š0f.8~'Qh,KoSfRe<f_YR+8X BHvX,Ħ ņD*K c@A1gE{HP6,iþP6x8x9TJ/~b;6)[hLI&qѺ5g9.*v P)I.>( TlQm:ql.N?E__GOObƧB,G02>]:CDw0w>ďy5zܓ`b%,-tgw~}<*nϢ G3V:G׃pGLM=?]*}_{{Y{HK):ϒDI=ѯ.^{+2[[hU)޾ej AsCq6]! 7 +DAㄺĚؘEDlP Jx9+Cѯs!Y%37ygU,¦_BSNf3]G|g8mnF/{=Rxm'Jŕlv$]RHʉk!E $ 13FL9q$ tua-q㬩8HYgM_h @PmO}tQwj>̯9*"O9ny . E)i%=<3~p~@|˧ \"c}u^?02a2L9*VUdzd*8*L5Rqs)QA]ep kLoΐԆXgFX)[B>kA8>} 17#\v^+s T+x#ܠJ :VHGcK.lt#"hܹZl6`v|R5Ҍ4 ۷bԈK@@T5;S6WOa}&P`RғĈ%#NĨFzJ:Jxw0 G 'k0ӗzzRE(n9tmbi"tJ5 _}iXe}. tA.Re#0()>5^-"2*ɳ}#fa3H{`Pu7#)bvc.3z{3n(~&=пq8v]ot!Աjb0䋔”’A/#~%kSCpnƾkk 1`Dca3;EjN4w3H!Ai`Qd#]02T"jvdl0=qRYxbt{3Af|1:HDž>|eX7o_.C:,!yF(l+.G11!q׺'{Κ|P!l6˲.asX1lNryiSQ{#:bEVG'pa2,~8 r&ՂWb7,}ר2k5~zw;XvVʲ$XܷJw76͊sqpcKc`UE ė;7+ټ7:5V%b-sEٵK. `n\Z_V4nfAn^a"v֐r͒)[MJ-v;$Pԙv˿ݚW.Y2U9ǹ 햋AQGsضv˿[ݚW.2e TLTߤZRq.{ebI)edӕb#VrNxJaR0wjƮc?vv>92#*gu hU‚~a~VD TO#׷);aj퇟~ Nf㏓2/hf7lyLGa\;SOaK*cd39%/)PRtMTB9'l<~{rڝF+`5b6L1b'C&JAJC#$)_pSVM9brz.JO"QV_z$xà 솅&,;p*aW-0JXّX3k5?kT `7̒Ǘ<+%DnQ%3{8}FQEzi3R=:|n`3%MD2Wi+g{<yWg4pW7Y?{>M@(gy)ZN];{Iz`FKLj}zQDBp̈TYj-( uS `l>a$s$Il0ai*)', e%ҬSlbqF 3 LPjSE,Xg&jAi3 /sjjn d/Id5:)A"J*O 8S <o o(HeS:Vj4(,#rrqRnlf꥓MVjҤc(%6RNy3aeӮ$͋,PkSVs2A" f lR)18l!\W"p"fEKQى[JN`u W2cZ# p7%^p' :Z1`)d:z-F'm"f"8P b@"ARKD )a[z N i,,jex Ww_TxVq(T_]D`z KF|x(E?ޔ߹ѳK-;wK~xL]F\0_r$)C+6`M߹qK ,_SeӬY6lv `8!DE!!x#(N(^{T쑯s䰃iٓtp"uGtwT "[befWB49QJGaFs@ A;Y$^feaHbmaF"hB m7z@ rQ ?:3WL2õ>RY =G%aZ^FWP<7JBx}\RhQ?ܚCߍkVMlHcMV{.;y6NI[PZTbMb/h!vw[vYW9-&,R[m=zm ԃ6[6d_sR PT2tZ҈)x5oFrqܝ_o6l( ٟ?яj' j9t .۠3& KbǓy9x6Rvf\)]iP>icv.)=sǨFx[S%;kF y"#SZ}ݢnk7ZzD햋AQG!T@g-?e1Q5!!\D)ʀ.1 ¬`Z:`$eC#r/b 1ٳUOnwP[wV_hb?jR{){5P/Jzl/!VB#l߳&u̮{a!G+oϸ DuR k C1)iWrfsR%_dۗmI M^Tp(gͬ~S+0$Zz]=hϧ+9'R.!=ۮGpmv= 54 `3v**wi61V %]($W0U.,b(x#|%W7;&<0ȶȶȶȶ*ۖo(㻡acA >|GZr@d: >31Vq |8d7W} `Vgm]# ??͐f.߭G_(l3QRT.}[WhұL_'o柾X,gQȲ % X# Xn)Ma"I\/BlpA)W9VwzX)@',HÜJe\YaBaA@3.!m=s{V`'mݨydbҋA'E#40\(6 0*6RHq :.yqK1 BC毼}ڵv+5t0ݐem!^|v e]0PO2 dB|bzАi0vqlںUVy> I5V0T3Rp%EV |az xY!iM{rRԄJ6 {RԂZl ֚KA' XZĉ h/c{@L! ^0YhxRdB/QS2`<_iEm*LNw2?/hȧx6~.&Qٝ֔99u\IMgXev{o·*qǷ˱ւh0vèn!\'=Nwo :y&ȳ,0{Mz\Pw1e)شléd4U9)LE5$MN 'Gф\ٕ;޽,\ Q8:g wmm U~ؕS)M[* K$CROcHQ;s!1Mq4h|ht7ݤz*npR5 5Dh,T7^y3#Z@`boO%#Ɇ11(G=^%  uJL6,OJ\{ Ī1ÚMzn\3/!Gp-'f~9؏zx2t]-pjM @ӧ[z2@JhHܘm0TQU+1t(ǏLC/ÝKϵΗ h_gk2LWmۂ%ZD"+,*mAXHޢ(\(ř'^["3 XOb4ȘT_/SiEH2n4)CQh2d8 ߆g~?Ct\E@?)@?x)N~n2\^b%JFI.냤'@x-}'P ^ i1_L/׉wh}%e!IўmU "bTS9⤳)XVV!ZjZ &*ILChdFnUtYH8KPH@@F&92E< T~vƓ+C^kbCZ<)_>+ChgJp|TD056XX5\˯|ExQ$1@lY*I0ՁRn{ e *|`z'# ] ք|8÷xÆHuI>U5 pe!SQ3C7Q}Ld}LCo @iJe`-e&̧@2g N@`μ0&)))j! i=Zp tpӹ w@14Кފ'&I P y,)\&ޒJ3lK;G?a vNHlH`mP9 YZX0U0q߾cVDv 4gUe7F^-We쨲F_[W7oWs6VzVɌcWx%ҖP'4ϴAO`a(0:F Qżtmw ֓h΍j'>']5I` H'p!&|rL(?3a {Ec'EZ\|,&Iw2M8{C_/Q?'7ɕcop$op)M8ӗ~ %3)S$[hEZxgHPYlcc@90gVA]Ml 6Ò[O rыZ&+2J!EnF/Yo=YHs# ۢ'ݜ M~z9T7_V J4׾NWz/g#4B7kqO!MICHRH+9'(8=n$ ?up{.0Ap vig:v]s+Hbz>[* sꎹݟR Pnc$R5~{Ţ aYl6.!.jb9#Gcy8 .9o,ıJ5kuHcI9-c xΆ^A .P#l ub*dmCE_-N0U6..h\,H}k$S-O{8l$<%Hhp8DAx1[_v[Ǭ5-?Jϻ`Z5Azjw&"/-Yje { uXc-v .dK ĖrP#,Bz&Rfi@mU ,c)gTBN#CnjPǠCE @R]9C q!OuD)҂ gWD $jYbu3db$J䑑6!}mEZzT܎لKЪ*˄"7>J~ %>?wUާy9iӝqjEYB/ W\ZUhA= DDpRWu~EFLX>DZa(.m[-3}鉳cQՓ"ln}8<|P6*nؾrp/ĝ\ 2U@ |*awFI$]=ffl/g[U z?;J>R;EcKlkoĝj xv.ny+ށ;-xy/cjMm|vfBӆeϖg$74$'MEκ oNl} ^&Sqz6co14eW9A\ngcyd7 |~ 7Ao%p; ,:sH\5e^{2d%֥LZ΄AZB%Ӝ +\ο|>ؿAL{0XT~|̓: jqZ|4ܿL1;;W4* n.ؽ(Ƌ]\w d3d v):u 넣 <IP$$98k[ 9"g4%>pFS3p)mȝ+,.2eIas3J@)Zoxcj[q-~]"DBsy$y0#4U!q9%Utihdns-jX)8+@MKz[Pfy*#(őZ!Bʦr6%HJx }8G@j"^($6&>%>P%IRPU01 '+Zvv/˩ ^qKZ)<е~Qυ?w;cݷ|$ &car]qJB?$b.'t0SW/ 2wR#٣lj2$p4K k'Ef&qi+F3)R2S:}c88}p.OrE|x0~0'Dµ܉]߁;і 40WQ`s߮}u~||74$YzjJU^%.VR Qs&߷r 5/c]淌Rم,i0ns{g[`4ك7z!9Mvy8dp{%Jp*%\*i3{{qdFr6eX1o4yP QǪsI4arT ypa/-^dW]^7kt`hFQuC9{ $^܇)hq3 B3i\\ KG"$4ΞUẾ^> FfBy[z/a֘Iv92#6[cK{W?4QBDͦ)hJh&KB~jZXcJܪyÆ>7oXlY i~_&L@@3 9d0g%TqqڼM!׃t@1v4)Oe8eH(drTjˑČBGIlh<֒w<(W jvԐI#uu|y ;tv,GѡQ}H޲_/#`%82\WANHfڭaHScUme|m]hT(Ԭ׎ǘoL)_|>wiSjq߿/I$PJV<69"R#m#3[|Ɛ]+i=hQWk^zR%j rP +aY[%36$Va_gde^KJ׮DQ\Ps1^;VC;4y͜#4@k+ X0 x]kH蚆RAyTuθ.`(!ҳԂ]8{EYT VW}C2fz^KB6 y)Ȣ>w6L{yM !=rIGfh&b@@a),Ep%TKȒtKbR*0]XΩZ(/sXK1PU1*evDD8Z9G=J]ʃ4(gv'" Vj !s[.X=\c`Nx} ?]DQ_.+RZ&=~էn@$6T-ݧس#y2gqyf5p{B𗽯^uƽR~XOz1_4ӠxS s()jQ~>c@n('B-ACvySx/16ww v՟‡{NA%;f#4]t0Z2Չ ꭾǞNoݏe*Pd\݀ `?mޭ}&=aq zt_F5nis?[Jr LwJG ]᪰;ࢶƛc5އ-Ը,}7$z j]Ɨث$}KX]&C/!+=~JtN"7^eʌ* "ZbjA":lz>>c=G6`?o\ra.% 1-I7fp׾ȣI$}@"UY4 .wh*ҫtUV#*U[Q5.`Jn)~qK%m/ؘ؃0ج)Ga׆aT0%[ "2/J9rpaN/w_xt ˙G`֮YUҼ*eIR=x]L'OqV9ꙩZ}| Za2ǩKq3!ݳCI'ʘĤ&3 +6^ٖ'{}챡2D؛IyRGWJh˫ agX?CQO]|dP (@rzÛG_S FW nU_!>3l@НU h/Coc`3c 8Zԡ/4=ە}wuP'lpq!~G[!v:u.*LڔJ뢳U9'5IS+ X)NرYh)X<z@J=z4N'E??IDKeHzbf9iIE`jռP-$>'ueqq~\ueqxV?ᇘKr80rAz"3:{T \z_~z{)p~y'! 5^WO:Xc%|.ރ>?b|__ྐྵ\w% 5uj\ChV9an0iuh:M#gRY~j-8f`ѽ(}% MOsX+ OoT}Dh 6jsws M84gi.`kGS8f5#vﳄՈy ?X#0-vg35 mk$0Gx4!k71ex89r6.W}e Y6:_Ӎf͜zZnZ.KYD3#2Sui<ʦI -ieLуD7C‚Lӈٞ ı Bd0a#epեɪKG_їR.z%KGE 5t]}_'G;C)vn]wi7y$IXlo_4+ *,(quYb?A~PD=e6Q\jŦ1ݝ&lb!~)#ூ^ nŨ+o7~ys榌Mꋷ}:ioq$ƉΞolpLu͖lQ/, g%(鳂q쌒>"Q\U+G9kPh]_}*)}Dҁo$GSVCxJI,8# V Lg1og9޽fPEun,͘DO+%zryٌϞjpr)WJ͎)uB;qdDvCG(9U7Ph,#8ڌLX@@gB/d=F0:ѱh[l 7-*h-ijb!̓,y"Yzk=#C&X[nD^pJs|0nߣ}|ǩ^5duL6d4^@nLD2JG5PiisY# ' Y2.Yzi xň]@5 _I9 NWNJ%&IGΐu P3~OV,??<|aO]$"Έk yViFlr3gϷg_>}o1qgw sun//B:G!GQBc&d'?4T=#kU)>zF7Ew S5o7%G$iխHs%BqK>B ծӋIםFu 5ZJ:c]x1BYg"3͍ͭD!5&Kq$-0cAuX95#/Bԯ IWtWYE|I%6&MZ"Srz"1O2jBGWFİǣ]nw] f+J=L ȣ;}ԝ(k4n}uд+U~ Tsw<_WB)l'gKjnϯ\ln+G]mErw~%43'-DIdC- "sUv1s{CE| |u-5 uקWRo{yH@52l,h l^Vn9[gU@ҕX%XN^V@UlZqIpȒXzs@=S#?aj-賒uE6V-w L}EĘ3F . \'mВm#;\r.SIhfi,7g!=~l|LnlyN9<#U \.e>Ĕ xSr&R ep6bk/3&|d}}LfNx\(2Rf$$sfz;)m6s4=Ms/lz::_z.JZV+< gX# J - -_&"5jƮkZ1{p ZrFt';RWDwYܴ{7!\-4h_l7,Zu}ecMV>5 IuCO>J" ͙5/xi]SP}wb(`oN/<)*\I>ٰnC x``r<x`zGy9=|Oȫ&md1z!Gk9\z;R5ZÙYv^=W`4uꪍ/N r;\ ]C|95~*{s7luO7oZWu`KY_%xuwd~-Gq[+ͽ?9_gԋW*Lշ k7ܩR kz$S{8p\LWl6<lMK5ӽr5/VrzZL-GMbV:qJUҧAYsL^&6kX&s#S,f֌kfi^ל.I?I`F̽%ڴ:CYSJeOJK.&k3ׁAzI3vFNYs $?fFd'kgc`ڦ3^8F Y&ƁtbA#69tީaf!nLq3  -!{_$ϜInsH\MW¦X0W[(%"#[Έ\ʨ ҾT3ҵ1Bؓa;3?k94-J=.N`$_@R܌ih+xz5AF_5RZ lEa4CqhnfWeS͑hn~t{}+FZ# fX`U~,7[` @PX:VbfNJ+>Wsdž{gyٴ@C4 .3q)vlЧ>&-(7; ~Һ4YKۃ ؃#F ZY1\>tSEFmwK-guV}@2[f/#ކ[@2Oz1sŻ@.Pg[fg=<쁆FL˺.< \\a{U_)7v%嗴 ˀF(@X7z-g_yiXi Rbh;)UgerZ ʑW>`37!FךJ04זMsISR:0z]pFxU$;DH>ntaJ2@C Y2Sv 6e mZ6EXBfSq>#X U:V_YM]r`Z.} u!rqq<͘kV܌h|JҥmyMGo:-q|a~ۓ..rHih\Nҝ77_,i[~:P~DEEkw_'2ApT:MB{ c;:RjS8^i#'(QCF>xS)2םnVPj?5?;,}=c>o蕛k&WK,+Cn[Rͷ]ȡ;vqG?53$M9혡G#dzv+-=nMTY=V6R #f  ݵx0@G= [LWmυHzRlIZ0e^)2]&Sɹhs @6ё;zrUz9Gf xV?! $=~l|Ln+O <_#jx2`st9˔M |d FΝb(sQ%͍LC_|~aqy2] ѣ/8evlT iONJ]4{-ܿMgMOK $@vҥ~掐cmAx ><@inP3>suR,'SXO}:fa9ڍM#z )2˔?oYKMF{bk[䬹w1 D%YX g'H'zc6V%#!ËM> eq@z_b4ˬZSi]:])'K"Di Ikr HfǬdA\RQ-va #qe`hV!e+N\2>kGTmDiro̡ND1l>p6lbcll**ϻ3^Kw786J6h`%~g GrJ9?)jR7'޵>#E/Ws{~lRI͔ XYI%ԃ(R{w*- l$9{P*C3ef8p>S~M>PTxs5]U~] ?"Lܻ8 w??UGoAB)%~lґUyd5\N["N.^-h9FPuPd;Eno[[3@6ozuPZHߣ*UPZ3`7+;!u[OQM䫋u:\K[\)ŧEq2[֠ yBAɛٛ݉^-yϖo0vɝYp-{~x/ޏ?OJ}_OftO♖uVL*{2!$ xfUooE7[ϵZS JTj>o]b^OZmVT … L;Vʥ3s%DvlG01|{x{} !0GֶgNOf/,,'#OQ8mHrh,~ -)%#M,w9O&֒P:%1py \-KJ9"ku7H4tELijSbnD(htjĹ#HBj,f2 OVFEDδ*O 冕jgԢZeUL&xD2Go?ٟI#=+I̫GF؋ $dv~TK:ORo*u{&5HЌXOymLȼ`-Sgk &"T<hCV%I+t֏oxL݆uy]3B̚mzCo1f8iVùj~]K}7LrXL]{u<\7Nvf?ԂVAۅv fKL`'?J3XmdĹ㫍62^ԉCni}&4K)v?:w4Q ܒ[||ֹI@+R1pH}Vm'DTA'G(m;,a-J#F?[ke? kt{6t%ԙ1laRE x7Nvr?r<}2 ? o|fi'YMXR&* &~ |7x* I2#ers&RoOqA`I#$ K_+~Q`AECELqyKBb#:uQE <5VkvkBB֒).;qnk7U[] bD;h3FqҞv:>R5!!\D)f~:~&\kyL]dc쇼jʫ@ Le,Bw}΢K1&TU|Vqd˒%DUqFn/ͥ.)eSsm %TᜠpZlB6d C3wE"/W`$bk8W"1B"R<3KR{sC&]hpitЮ gNpL0Yӌ(dtțT½ ! B=Yz##6nY֠ZU5XyN țO{6w`^0޸̮GzpU|zwɳR3!7apW("\D݄C֘7Z^HpgY2ݎg?o7H$`/~:7@=j 3 ,߬,K)CO!Q; M>K?x.O?vD&Y7uS8o6ی*nQ&K8K*ҽ™7X\q`T+O兵X9-1f((ؖZ ٘c=m=D2Um!E 9SS(^@-<1%tG.(39;_t|i2ь47n>l$SHnv yND ʅŒl Ku8-p?Y2lثG 7N:Ŗ5ւ~rڠRYY!c},c:f޽ޮJ 6^^.y]E? 1I|3Ӣp ݃ {p$kIB5hi"Xf^dZkOSᥴFs,0+C+zoC׎'Iu_o@cw? b> [ٻcj|C>T> kfer?z?RlK+PH- M+)iSGԖ$aTWi{2]*ЬGJI &J !0TZ%Rh -aTƅ[|A\=m{FPyIaXvP2pn(nō={ -TW3P^R"TĞ 0iή|N̾ʎV:#Mf0"Q *L2& n1N`g,06TO"T2GOSQ'4"kܵ\u5Cw0SbO'=5/igUSJ5GLu(_Yj.%LZ4w) ~D+1uJyN R51D,ܒ);w(%3:pG(*r#scޡ\e gyύȘ 8fYE8rx\/TG 1UHQ}m!^}A1u}&%uy\OfYUmkђV]ɚR|\{*PAjKr̪PJױS$80; v* V'!(,p2KydE X{}=$ἫYQ GKhń pdFfwYeQ}5rjhupi)A:5ḇ my&v\v0ـҝ9E-*`0Lo>]'`N z6Z<4O?{NWPi_֖?mLۡg(YS{`(TT"ɹ mN5 }ǣܔUܵ~7x"cx=qՂP^r7SV@NBHIYz#URS[?Ƒ>CR/s&^Xfc#RRbJ i"G Y- ,Yo:^wI8|~u?P4ꦭ'j6ȢT e(˩ЌRTP-:u&zt-9;AR^G-Ƶ*YFY4*XNJ=UֹX J4S)LH$7 X*uPc0Nt҄AU& v]0rGet/j:w( ?K޾y<5,O;/\DD)6$> ||B⻕c cx> .>ăK is<HpZ>[qQf *89xSX1Q.*DY{#(w)gSʤ4UZ"YcH}D)xD6G2/.*^ǤGnkOܤW`.Ap֍yR>S\kOc8F-aLa$| 1e*3)epP$<Ǵ$Hڌ UQH|CeBieN.8BT0ݺL.cJ0K%a,t>'wYEGh #cO82Tt'xw m6'Ap_㢡~g;Zv>䜓HG`5Qj=ʈD*#" +T.i~5th81XN7O.wwls'aQJtN>!sUеS7`9͇`$H Q`yD>ͥ>7Dd6,r?OTTDxWdz$(8t"+fq{su]a1#\}uS0M8gqmeL)qqs rlunL7B! x\ ('^\S OǚiCS8S͸Bg@!!v4Ř"E p4U#`mަz&i4٠F1 q":&qe4:S J'&C$aD KAa Qj%KX4;v25'Ne9'ek0G 8{d I)<𷛄f_j @HuzwJT*ɉ,oLQR EiEj.^. ݵ[8",llF pl}__"!;Ih/)Q|3ع( |X7G؛>e+F[1ZP}_5)Hx1ܠHlA љ9ڨFܬT(=ix*EDުAƿ8 {z)tؐbh@b [1^LW~GLNbD-; |DXy7sZ#R6$_5U*j"V4I6E!=ո+bISdk "aWaKy4VX\`ޱ B' 3G7sJ7BN钸PW|\ڤ1Ӗv6NM80bgsR[l/omK1X-O|l29rqѹ|˷?< )}1b0,F rİm4o[@8oZIH ",B 2 EFfEFn+B|||;y;7z;0WPDtHnlOaU-M,ӪD /xWg~Jhz4vc#g9ز-ſuUd&^MiTYF+ڸ [!D${ݻ50Z.v>W6zG8Dsɀ'*K{5>H8E{,"Jr>5V3*n3r{). j"_*xPh -/+$c9[1Z*j͛c2گVqJf,$5$ KìAFYӾq=\?|hضLk-Y{ Jyи5ʆ4jOǼ1b4+8z&]ĭkȕ=;5!N$!U!=I`k3WF 5I¿: lj-c#apzpN IS#L ߣ%2#z =ݻ)Ь7usn; (m%y-: -r$^*0_ZIvԳq'1 #L$BK)S M6;s- |I#*#>}{Nf/=Re@A?Dr`_=.I=\(KR-P$@ˡ$Ⱦ[wϵ&kH9/7|H4M2)8cu ʉ5tHR&SOjVYuߟscflǴ(H@5oǻ|zt2Z]m?p80o?l i} ?w?oD'>-Y} قY`fڇr9f˻^N~xuіGdy]?'cؾC|6&/=z9ۛm>U1u6;<|sGӤð<_?ѻj$}Ut˪wJ7EԻCԻpVp;&_Ϳz *Nvi>wʞoŃg: yʤ}{>B`fA蘳 {SH,JmXX#J& #l=b BC` ȕ5*2X,.dt>ibx׷/mKbDq<Ģ 9?}p3m1 " %h<~>ṖX+'Ĝ$rỉ⥐BuvofzcF]ypɐ Lj⑬ ,/ XSN1dzj5fWBhV@GR^ʥ+v47tR)3$q|} zO%qSQQ7PpeX'B ! T8-VZ) ʙFLdjqwר߯(R>#mt6*΋h?xw>TOET@}wq-P@L!uH3۪C͠SG";hUV .R({Te5d0R>ܛu-VHt%8jSɾ=5w׹>xstV%ZZl)֡XlH֕*6k0$80G,Tp¾m:ɯ3W>#<c}˟ui10vzke~g'RyA!2M߹\(;CAK&K zih̀?b!,b!\[nnS*N{f93E^CˌpYDžWY|5_ozXv/0WFAOBfY>cS[h3.8M-ϒi||Lee l, +DA Aa{Y @@nMnYoxQ9Nj\Ε*Dɚ z %+,\a[* 9feYZ,#ՙ:4neaBsyRϗ5g󺬷o4u\`MPr(oAoaTUPy"`BB`f,z`9g$dLr^d<ټ-rҹGp 4ŝtS6UȌLN<3U,cUIhQN?ɛ5 PRMzNaSlCrN\%td^k<@(xmA(8A u5Drdc72v;@$3Dex4 njq 4!Gx:&$|MU 9Цd$ Ț+ISklk%5fwDD-=Dk7Д)nF@)G]NOPA>IRП;s޺譲"(!! izf;cո-eQE] )=יT6!F5ř=sF8{II;Ltsig ,2N}i!܇F[3VB9"hZgLEf'bV|+Gr22H vG)zlfM쑖ߖIXw|㙾'%QM_K5I |̜ L>̯)[~{w Ԕk.9+M~IqudytR7Rmh((^z_EBZ92eW׋g-Yuy̫H%?<еU_ u֯/\ XȿK3Ue8AV83SFm6sd0L<hgL[Zv<"W"(o2e3O֓Di p4̇,Ң4LsiQŰKBYԙ&cif4ibɄf4Y[5M &"w}Wmo [MQBunn,ˆMLFfܦV]pr,MrueFdXܖV]S5rڌ` |!JlhȸQdT7 VlK_@-=%/`Pscn)8P^BBms{ꢒbjIno*%}gDw?׶>NqQ0|1Ȳ2@='ʺ@㎓6xFcRxny Y! 9+w;k\g粖gS@Cw3l6n^SZl -_a%7zw_.&]w$4(^H+HO<^|ə?G+n2@nJܗ@n.Ev{¶k*n%TC}L C֘H~Q1qRެuɁg9ٮ2fvoZg);{'avIDk0*. 'v!A--Kfn/I9n'\<@ pQ{;BA> ;pzn\D-AT=E)wg p)ݤZ)9Vg}КX) ;!*c?v =Axn¤" :0c6O*րOgZ"̗+!-w ~Jb^IN29$mnv`2⯊ ULSe L:Մ µknVJѮO857FHz9*Gƕ4ʃU0{JhA&E9%Uog'PZUP+)℘YU*yQ[$In [YT28!q:Qƃ!'+fE jU:ù/ψz:iw^t߅5Y<]j6 Fo4oxgDO;{o-̛I8# @@c/Bänv3㞇JiIu_GgJ0 ?[vo6nU˫O D 3r20U1+%%VK4hH4WZpfE(3:(턓v*abǐL̄k6m@XE`40$ ђ&o|SBRTfpP #z;N$Oz5qOvI+]Q$7Qu2+}2!{EhY37W$ݛ]o ChoZ)u-7o9;drC*M:u3''0JOhW?nd`V&Z{('3R[k-W6J+(DMdhgEd+_1 WSZrrXS#IXYfWZV3J2$,*U^z+AF/g>'B~aPn%yR2GY#9}hhg#3Z@\Jˑ,@J14hU0s|ve)q P;21d&$UXOk+gFA8 /fiw]CjKH[Ŷ{lcIָōďmʂ{e(@}g3V"~RϿ۶㙰 J0;~੔\ ZY+R!!DyTT80ٱ\ 0DLO2Q=ДQ*#)5l "{7cwӰm&$L}n Fhޡk[ [*ǧvA䊘ޥk6jtKK=Pw:ecヌ*B3N=9I9ӊZ1yl1 r=y|iVI>1Ŭ*, IT3t J+T-#xT^ʨpvEJW:< K :'SQ@0^ػ Y,^ !5lPy}o}UQe9͈Ԡ De8$]jlz JKػFaX>N-`*:ƬJ IV'oF%2 kηj /3ū_~,2 8gy-Cou}ȳŁwAޠ|eC9+T`u?OǾПlVf8K>GU@K3d^pO3d1pK Y\34ߏ7`n+צ—0_Xhg=7Ogn:}u}q:[t` fng٭_Yy᧩ _~]%%IfjΒ̼ȍdf >;"H>ȯg~2q9*)clGn,qIr<=Cgqq=ca Ih0R}A /?ưyw3/!Mx6Z;5ܳi`mŧ L6pzt|\jŋNqۦDĔ a' Gv Yhש+/L@^x)s) 1m20zB:Az8*SU"Tf#r^'D_ry?b9߃ZB2}@kYRQ?e^I\DdJȳɻMnN;hSY `ڭGvBB"Z$SʴJS%@ە?oFV=v|}^+kH`7GZS)㔞C Q 1 (D9JrYJ<*z*GڤV4D04ݱ>ա=nZ 3B@Ӆ BVOm'rP-%N!-BN r e\Z·e:LYo O5YxYHrǙ J)y(z kAu'F@*Ti Z[m1VZ1t#C eQ"TXԤ~c:җm[k?9 |Ӑ]gÓp:2b']Xw?URAgy6+sĜF3%4|S\ 艬=oq _RsG2Z}N|n8nלqu?/Y9#{!;ƚ'/SD19:1q{Cw?g~A,w~03_diXt p QvhRO#6\bM1׬>M0\{J;j\OFGpjdyUATXyM@ФYE!=LJ0g|å η ^gkM5uC 4NEH/4ꢴ<3iB4N3)IIQy9vTG2'TI^rc?0}aKD&}bB:ͩXc?M'"Iq\xrpHxd+.'C-$.fGb\[Ny?X|Z,jr1混ŏWYף=\_,pۏ0.DOui4pƘ!e=>bb%FrBDOc|=]rM`4\ēt %v;Qe98e6Jg|ukaۙvgXl"1J]׶es*e\yB*5T11}L8\mbS:B%lL~^gubk QB. Sy -3.@MXobC-z5[ּ;; ag{9"m<,!Xf)Y@o5K9Wx)!=/6RC*ڻ2^^/._vmtq3GpSΎv 1)x [ <Xrs!m:XF1\Nl{'у3p?6(#\t[ts=usoB`x,59G {h)K~M&݇[ݛL΍g ǫ79ZE4d퉄u#q f& t V{DsZA{;ٮSrzXs)Jc-^U1f]oHEGw?ƻ %,X;OyM" I7&E9uF6&hi%ʼn([Tg&Ae$ȼXd$A$&\; UTLJ"T_vқ6} j"3q< 7K{ZOUOiZq&_rJVP+;37)uq]׏>ojbqv3rƹVtRc)D6v;`IIkv=\UjJ:RPTw].b>*|;`K^>VF-injHep.,(ip6ۉzGS+EU5gz7_]P6E2Y=Hٜ@ 2vly&bVSMK/b%[Y.5pq(VT7(тDHU5(C*?"@u/|f~]hSZ+܁dj`L16Ykj* YA2 VB[Eym%{Kp0l)_pVW& 86`M&kߑsMs]Ϙ:pNCinV*Y ]gOLJͱ&l쟹z:/’÷/Hhoo>?\uT^?<>%Tfi_ 3_Mn3gBySe~v58;+>puGhCoҰ*O Ŕ+bΖ%57hRj^S*+q NOpIS" Xt:)&+yxxgEw=Yfu 7A ,@[='&PJr%v؜P m!:>SVGׇ,LqdtbYcOa&Rj;`Yf4(ːt &+EAۑ0N4Tfh2C0yu}P;˳2 *$y^,/Q%rJ+vBMBrGg9gU^z QMϴi>U;Q"*rJKynKE?<2i!Qr^Гtw%7TEyAraY QeW?d5F䌣$EoS#__;A:ݒ\pXmLgu` e*ՅkU&HSb Qarr "@Sn1]WXwSzݝK.#DD1hkMcw5v+(y&Ԛ ~N#Wf=/!ȕ^e\2z:4W׋-:_/}uܭ;.]߮ gX^=:d3#ʤ0,yciic Qn1G *7Qv+j0WT&Wuit6ܡ]~Jmdnı%e^ڶ;`[N`s|Zw jb nb":0P^E>zҲҋbQwSЛqMG* P܁``m*M;(9%;cV ϿvqXHBUS 3o(8YPʁs ݭ7GJ]Ź%c_/%AEq~aW  +y)@!"#^8)z`#NZ%k`QAV^yvkinuR5PTTʌhAP^?y zϓ|wd4|]]u> w·rMC]WL6(Ro9\@1`0W*=F‚ˍq)7) lNq I֗F¡릒j/X4C%QW ."ȡ7b|]У]}Bڛ*%l}yDQ3S6i=m۲1e4T$Z1@*A) U;Lw+5ct@{zh)4J5?|Ʒ4YImڢe4lyRe{꤀J,K`a0FAӚ@:S˜_۔ߒTGNoI^5ъFF}7AkɌz-?%ږ!MuP̂lYU+4\B$'%(.GBjJ< 8n8ChNMsB;#g\U;k@63"G1(M/:i`hPZjd>lg1# ldT P2\EiHAj _FDiAHÔd4p9ګ(M-9.D Ɉ$yƃE jt[qIw#L#&±E*>}\qhixƱ# a"@JgԆH\˸Be_>}mO0bFD{҅1ܡ^B2( ? $@; %gڮ &+2o 撅̀% n׀ W`s`C&H:lX6uuE -ei4C&s;xs}@WgQ΢^E:ժ|8KC`atDB`AOB@1vƓ:GׇP UC~SZQj2 ͥj/Us#|Ђq3!ZU+A+5%^Z+Ezk\AZLJO`>%(M >)lJjUaI^_c\A: ]'nd>^J%Y  + מqI%2)0tVeȵe˻ B(.Ri%hΜ"9ޒ9LB + ĕ}Ft`0]^o`CC@@rb7@_(cdidbh`^09qR)tQ&iAJ'pcAoj!9b:;SudÜ_ DrȥjݖLwmI_!rR~-p7Xll8/~ZȤ!U5|=!94 DfjUt4 $[nXrͫ>,fʊYsVzk~/X<Ť)U3Q~H`dPB1'\w>.U|"!c5?ݐƫC`b uyߚU-M#;Ѿ?)?f+V0J5jƔ|MuֳA}0GTY=O"lsvRקI> O04XIG1|1%X+`.‚prl}6$4kr)&GFwgv 䜕R6m<"|@Yv<;yVKExA*C x)my J{^V961_!\dD(?5/a-8X)|$ȘZxL ⰲyvsY-3OpaiSJ.33$q؞ˌ\4Ay 0S)h <$nZmDGĠo1~`Ia4+)3+~zϋ qQIac.{TB5%hyJ'.+]^`D0 .e\'&'3۝O'K?To>՛#n&{;r:}M7Ddڭd0~$at}uQ`]Pkftgfd|wu?_o оn9=7_3")mgW_'D# zaXE<JRMqŗZ0\C#$TKeka4S3.Sz9՛OBXO@ q2s"IuT7~5UaҴFTm*XQ^`\R[T͙S,ADLiy2dbʙ>8s4sFexl]M u6.[2ypPťk Fo sETXi %JUլs?pqpZy'PQ ,޴Vۘu Se0%'AJRdbQL-9PΙ6`nԆ٭(De(޺IE(\(2UA4ЧvN8x,o֫C?J& sVg v*p5)O<ھjA^ >J&S0T5D踵ժ/wx2Ey*.V95 xipעBPqNzQ1:J"Ci;QI' 4pt6}.jUGf<\&[yDBy1O{})qj`<|E$:":Č")9̦L[ѝt>QnY/m裸]45yp  Q ShO0LVwTPøqKH %<(Gcg366p9 #+%(I؁/U'MFr91H86TXhM @5~rb(J4@evw}1^,xϠ̑"2;iacBDӉ|f.^B!Ϡ8X??dw>Er%}|K_3ҔA7e*Tuc/5xm*'~URh<םw)(_j!t=*ͽ2OEsf0gI'76]y+({֧? IWԞQ;邏n ")_w4#CfOEs4VyI8l2C)Jk΂n*fNkVϳMy$ǐ3Q:/n3JNSM7?P*M,A7M哞>݀ԌvX\3kOsqOe7e1ZUxѮzx[$ i,}8}[:"hbg4!wo+DZp`_dj qΞCp3Qm0RsFgU؜ww_U"6m@=`'߷fa'pYR{X[r3ωsz% 6ZnEv  ^\EjA=%=d׹: ~?ӈrbsυwf|}+38A i\8Z^uP:4x"Jh$iWS&k2RbuyD+vZ1~ ד-h01"%Z^"k .$8LyfR#&{dvcuxcsqn?#ɾBBe}*pG+p*%:5G5JHqCWaz)ݱ';UR#!9X HUe''^8~8`*HHǧ?l&ۄupn8[өXcXt XT($@I25Lӫ>UmD2LHOY!Y䪣POȸ 0FlV&+7Q8?c3߇OaAu݂@5+WQAҫN4գ"~NM:_XX)idWZJr(e EFuN:ADy 6FL "qB1L$#TVI .GY8G\ +Ai3&AKZJV(;iTmA Nѿ+hsJ0 p,So rAAUd*T´oS:JW*V. ZKHPYNN?YtJ2C!R g ʰbfy0~cepkz̿?*q䚏[{"Rx6:!B'(\sPTkZ6 ZCkŵgg2+:UN - L G.^ f7OSBLmŋ jtD.ǚpEJ馌)nP S*޺tH(uE߁$@KߎKF&"pO&} 73E}gXmq }Įӊ޿XۊdK-|}oq YPg_Px?8͜G/!i*ZIK=!t?Tʲ|hݴg0K Z>{WO؇Rо^IKr>cFyzts?GÍ Һi:tVvCT7uN)L{bcD9kҐ Wu ub[-` n.4LQYKhDZIkbn} |o8%|@3i_}6ë?*y_#y/U{3P)QLϷW*NX^r}:C1!(0Mw[JJeX5bڀscO<kkI&$m` wXC\+0iMa(!δ_2+f~)+3;7kT \x235̍@[S|{xګ -C{OGpЌ7S㧅o;[ ]Z/xvxٺ.nk?rw%,6OJۀ"JHטbkRJ1.`Iꣁ˂đ`1\Ym*T1g!2mfP]4mQuQ9_*ƐJ7ӌeRͺ &P35\Saex#{j%ZޞL],{ YڟwG)5W'?o;nNgQvEM8"f0օF<'!7 q96@t)&I-W ɁDbRM1z\'\:咙6@v~ 3 2ì11PFAK"#::-5T1c5x*s9˅Fz+A)FQ)'_"G~ N bO/A1[u[փ``N 8QX=˨FA#Pڑa*jE"JZ !.cI8Gh!e9k:Pv1/(E ( DAh,8VQ0!Eᢳ(/TS+˔ϋ&o "g[50aA#"9+:P')%{0:DHd,'Aܴw>&㙕 C5&a,x0¨K.&` {?Mw}kZk7x/ `dLZ;Y (DtI DNqw.(Y;ĿWуqj*^wVVՅ970?ZIfe.]l.US ԑ}U P H5be C ZaxTTPdtl1' Ywe=rH42b#xͰ<MW{hgS=c{cػ C6(H-ľ0aqކN8j@pDTwaR Rr4Ⱦ!QW )P՗A~eP(y8'R&6̟dIM4-s9\K.t7L*0VȮmA#C9XSL}w/;ɾ( ;b^}Fa`%o|a6]Vm"Xsv$0hg;PyN.;B `R8wiU λMi].+DjIdq@ I`EMi#Mp!I\OQ* >td shXv ZZ ڡgV2-KE$]8 )Gbu.UJvKpb]T|Iǁ?=>oҼ?/"fxs>Ay9vR'mݻO=T+bפ]/z糷ѻ{w>woKq8pxGf]P˯$60,X)k=HmyttܶG[$m@v$[i#b(rJˮ1JKVOv2z*krjeމߏÇ7eat ԺH\׻CaA#Ep?*UYdzq%0L0.ٶ"x?LH<9)~C{OOD>8'H*:/R'aGp<Ouj{u7:ZL)&f]N@K(/GBltT&kw|0)p^RiZ6|觥c* A_O(׿J[+?y@m'm=8* J?OW;_>D7R7x?>[@Ojz*:aD7`};0U00CWzn 0H(aP yjPYOPG(@Q'jih epPZ(F$ %Esđn H(XHPq탖3qJjq82"շ?r(-8VqPO\erRquTGN+{d-dc'Do"A("5,1e16)j 8, h2R,{S_|rǔ?8}=~Xfr7nM%m3|hܭg67_^dboI`[.)/av{UKLeO2AxAō5PCYuaڨ TˢId7,7-]&v=i(O/Bz7ЯASNꎫO_}^|8WI`˭ }E=gDHqE@(w)|R,_}*gvv,$ѾN_Fƿ;vsGNs `%\},Z_N~J,g;hW%}M¯ױ\ꏗk?״#]WpjmŕgmjKFL0%4c!DSQ*DAPDǂbC"T쪟n+JI/EfHT^aIXJk!lR(lr*)%Zk5f"D#(g$9j ,+cB: +0Y2_W6Z(iz*4q 0A1ȺDE_΢[ഔ:%#RA0qi/([+lٷl2_W6(4"y˽K;6 pZ'8OԕI@+$ CUwA(-3P!4J 0Iܞ7lk<;CH[/S5X/Q¤eі Ax}MRJ &hVvۛ2)#d#db % /GhCA5nًyd̻ 8&mZm Z8N{.ƾ)݄6zڶB8DTOuS7˛6 ׂeը2^2~86zj.ng涉ŵ_\ǿh4V2nj.xO9Y ZFuW;KBg*Ơ{<>_U ?4[ҽ)|A_̴]T_yABCqACäMu*DG޼}ZAPW⼆ ƥďˁw%|,gEcU Nӟ޷ܮ&\`9/]jXgxBDx8M^/  сDKFs1jC !B]*,J]٭yc-r4'\˸4_paJ: ੸؝EB‡e6zwwӢ-l*l=x?)% T`uYDiX_?]EĕE\؝EFc:RaI.:Zg "=(|-X f\#78N8y}_rQ9_`ܧFVmV5F:B3K:/ a|+<08xrJJ&lѲIYYe|y/C{]\&"xIna9?`?aFʣEu7q*Ԛ<Ƒ:I k#ヨ$@.A!mXˁɎu$TW$1ITWX`&' 4jq yAIwF8EF]f_jrSԀC2bζ"}V.h5D 4x*eLBFy,3s\=x o&н;(1:Cwd}R:I}Gfm_IHpL*$^"(\⟷++(Îe,!XhMQMܻ2$UL;7N8|E*;١I&3v!xg)xgy 1;1 qcry%WkN"+o q-ţNPرڏwVv*0cbB D1Њ|%,9g8˝\Rʓ?R)B6pi`V2 j$F"8`BCXk?Tص y f"{_)#*WxAG焆@g+&e:Z->BeuQQO{ i\WZH9NF+e+,ș)AaF|D-J $&I[ (a9hY#e`Xa\ jA% C1 q61 ˸9D#jp2&2YI5ܢp` `V@ᅍc`*P&,rp97RlZV6C+ I29%#1bX+@ Пg> 6& &"Lv]Rr3r `>#d4eҳ^{IZ+Иgq_p 06$x&E,RQ촉 r޻+"ApI %gaJ4"qDE,v#t|Y=HY24;V,׼G-F!ݯHSE%/׋=LUnK1^.໾9CbG?._ j}N7D$B0vp}ﮏ}vxCN>^nťTkJHogV2^&5d#}0_)~\Tt޲A1`)@R<ix&\@, ` 4a2fx h#FR1٨hiYӗ}.OpVB$&VΰHp h. 1Hj9%`pP̛0кuj e:"|a8^\ð6LB܍6 K. j6J0^ftv"u 8FDz*A A >ymö@oSpg {x9ʹx`[G5Ďs>Eeܩzn;-nq d-NE)#I$˝K*ҴIRiYl1wVs9S w0",=@( 0/q RZ6ܢ%RK^U2k-||8n٘7Ja+g;&j,C ] k/~+' O"HD-\3:)՗Z0t1)QɂƪjnS:sD%( Q7Fz"V]Tf/p Aˬ1#՘ᾆo5;翞߹==YmۇUMf) j}4:gz, >(J]gӱ1BƵ u80C1Q)U6ڳmvkO" ɉ*q2(51BXb"ðA&JD" ~ᬗ/M3pm-;VU.lLyR2({&+6|,U.pY47Ƹ%Om{]IwgKTr{ jy䇾F`ǻ ξ σ9fi0 ܩCtY AÒY-|>˅.Q G;k?}|}2O%"{_W#GC}ڋu}?nrB88Y,7|/2pn{%p wp8tA>EƧ|!|4_.]N(,8VYOfl |k0 N˗qE'/f,I9ka9G%xddZgg2[rsE\W3-v 62zõgze$b4j_전2ȗ]D 4(\hƖ O,.b)G@uZ@{qY,-մ߯8hi6Y9ӑըq?xcWAN$g6saEifŖcda323.zg?OpdNO_~47d:LWc>)$,2hL`3W:^R_]gLf{ٕf-x`?8f~M1}SoRB7w|g>v&t4/N.,ק[Zx<]c\n 0=|\7RMwe?Bn~O>w)̋iS@f;<ԙ2] R`<'3WFqi1.6b .6s4S,Kh"CF$ 5r( |Jbl@37{;1\]vn^pUA*wNd661NcOxO8 PxGp@R{pdn+IÖ9,hA)JxSzT`I'(PB,=هM_7wc&>{yveE"f+짅BFtٍN4Xc-h`$  FُO͛" nF ~B4D^g{ͅ~p G|W `7[ruR c[Kd ,⣌se|~ ]e@ޛsB?}/ | 7L=1aq!R2; %b*aQuiLY$Q2 6yAI0HK&fxBXD-(xSOUu IMT(Cf8 &Qkp[X>hReR({C,}fV")O[Kx=-S)ȋ^!@-_:3y=:*we.@l&n\Ǎ'pCu@ nĮtZ޶㊎S, $&d~RT$uX6"q' i*!uBQh:L*8'DoUbn59]Ƴ{=w}3]Q(I˕զG}ahVeبeѹo'@ EV'!c锢H&'$!@,12N8E,4rbԀ4hƛtE&X8jPcF#eTB"@2ߑLYĔs 97AXѩ|$ck[,b.ib3l Nki#a[!0' ¤x>7 hS{X+V҆,bEbiHL ̶Ha I*Ӗ&}:рz>f\Jert%ZE E<$e&gE*tE_)}F͗A `ɿOI)*TFc%!A*+Ѫ✏jrզz"J3vX„ LQH?5_Q[[ȶeM&0?bjpɁHtG砏&3IGChF1Q}U T,՝aoTC)zVip:ZsޝK|V n>j1 IѩS(WR]y ;n:JsZ2iBEhnQ-9;g 631X労~12BpɳJ9fWw=*B1 0,t|eT"1Eɗyir"\g ~X| g0͉X>?'{lWj] }s#ЈpXG?._d0I҉7p0tփ8ft`4Hgn!N'&tEuȹƲq)ݦvae V aX&$a:Ba! Hi9e <#Q(Dx:bE/1gKIL8j*R+etbb,g?1'aItͰ& I"8#L'YI IOzs_VXmr}MAy _-Ai$c</܍`շ:v9FB6QBcB8RƑjUdDQ ǝqlKI B,G3a8~k0}5 S S17 o[JGKx8iC|j) Vd81pU~%%f0LkknGESq*Sgjl6d}I$@Gsd˥d)[DJxe_(}FhtO0cd?&_Pqd16Gjr6/]g#׭}G&ЕkL,l~D(ek/ݦ+7]^8_L]b>yn`R:2Tə;;QLWЌ*zNI1S ( YT=K^낒w<^|-ceLȉBA~po/|/Ҕ֐Wm sdgl1 C7I!ppnχݨl1v1mqU<;,\dx=pæeb ҭ3\m| xLf(%syܒ`/04LbK֛K,xs dbD*W(W%r26BD5yI}i ˎe7kB~Hz] mo";*3XH{f>= ұUXPI.Zwl7(& ЈMTqLI hLPQX B1v&ec\/TԞ$K,*ӡmƽcaA0jO )R#PX"Q!aR`]Gx~4M61k]_|Z(yAAGQsл(Z?9KVt,0@c0\!Ƅ%$FD8(Vh O,B†z, x6=]dhb|ʄǑԔ6IXY'Il)PQZ[P)J"q:2I$GFśWO;E[)LF!Xd 4ˈÜ-W TFJbH"#$_d^F: O`9#Cxѷ~~[U0TzԪ+OP*rꨏ\Wv\}*A l}YWF^$؏\P_i5a qdP,X)Nih#ʄ*xL$a0Ix! /H i$h > *<:Cel8IAJD&@ACL6PSJ5 ZAp[a% ͥTmIel 2{hӂdsj}G{bSݠzH@[41*9۠YRtBj #DߠYE oQB%eoT;6Lbf$kǩ us!(1Q tʘVZ$D"m"'xr8CeV x7ٿu7b( &smlUP5/FC+w,jV)P$#BjkEi߼yHVeE}k"WŚl- (3(1O2e-nV<94ygZ`H#tIdH%!8JR&PyyMV޸kXNI'Ha! tڣ7߳Heq5aj?0')B0ژ8,AHH 'HMd[VV`~cx zkfĞ,CDz;,ePbX?إ8p[h7 v#)Le)V6ڣGjPZuhQPK\JR5mJՕBn|EţJiنވxb}Y$ci t!,( P by"hY !yd Y;TQ:m^a wNfSRy\W]ܽī.K{/B7nydY Yź㜕Z _s{5ifZVfsң{{y~ltp@Ӆ'oyV(gHI;}<(W&|/ J),z3ϘI4 $36uTx1r"j #``pcs-+H|qq|' S<56p]V=V.S9 B[ GC?*$!y>#x[t~iX8s@%ȉ)a+Oٺo`O%j$٪"n5^l Ɯ(ٲRI-Z.pKw_Z--Pu7?;@ӍdR3vKS2ki_6ک_ ^p=[u!7L%}iCʥ6.pI&vg/+ew3;@tjSXosHXD! E28< q3>fƙvmk&i/Ql{ˉ?ZEvfxHK^=z%'w{_'"4krr=tx;ܵbkr\ $wi3 ބ9J\qtGXTZ⾷%M%X"v|R*`xK`JD;:-W&PDf@eH%xnl*Ǎ oz#iBpv,A"߭&T,|S sՄrvȞfRܬ?(֕#>פ|Jim0)abHh,I&F%J $%I"\6PXm% S1_TXؾ9+JIڸ6}9\>6jvvqkBup@ϿY0k'"jlS{.Lx[vY-]2VQ!KsW1"@ G)77^AꡤX_\ n\5}`Y][Zַ9F73mCjIsךڮ{^>nU-ϫ ZZtꤸ}~Ap/-0ur-CPk ]چ d=42rtd4r E [4t~2ʫ4,SQb<[L`?.;ÂFCR 1'Wit\)?_\ (`8?xhr%`8,BI0Bqh9x0lC d8Sê*ˆ/eХ h:.WUf.sn1 21:a?kTB \In8ʇGy> NGiP 齱5.g09O~.k>f SPM0QH·&22+ |&dREkԻq{okzPblx, yx`cZm~qyoUTNݪ ȼo}vLh/4AkodGm55'UT;Q-[&Q=Dh ͒?w3^=@܃{@ :nJ(&CarYJq~ e=W}< Pq7|۟>R!ewN'>GXw`$f|xqƚY[2y"Oor5/FgJP kבd- 5|{J>A 4^4@RրRsteݻ{tӄ1&=r{v˴ )WXcRHOLK%L#©>9pӰ;X'ET>qhQ%^2DkL9`IbV"ٹ`vKWsR> m=92tj9ǴXR{s[{wT L 5¾$]Xie>xabȖi B\wq>u* c cIU [d_T$] Yv"@DvL3toQqGD'*x7>,;4k>Ą ' L[_ mFRL}KCP}n_V/v0h x M`3:Oo h4ͿA0#ku:´ڷ0l½ 'y[=Yӂ+ydlGsE|S˜9DQbcB-5VQ 1&DIQ81%1KR9r f1etsɓ}tmEI*pZyqð % 4+-5 b&m-1cQ cq seƈsE9U:\NvąȮ2MmK[W!v;i\ķNU˧f?ߧ>O!crsYRNW-4 V >br[_]oq?@*?q8x X [./ܿ VB,[FqnT!DXΕ;2le9ͼY:z_m,ØL_|p8_D x..x]._}2Y{-/yr*InJԤKO}Iq$5%MI[C+Th9#0ˆsA48md[nZ%k|}7"BW>uț[0Z/6va;u?YK_3'L`A /Ϝз~z9s~v6=?;?9ܫ(aJ\&(R I i4j&r@[Y++RNQo8=!Iߴ4f0bB\筕K)xڹCdl);, 5N ?fſ;V^\>[?Tԟ}>ҥ|:vE.<܂1'dB~Y5چ΃̞>;QTkKXK|׵nױVnNԮ`uT":^"jb‚lY%wPOv:J9N𑟳x 5q)$F#.E v"0$m&H?QVݱgH ]fXk&T4I_mQNw O M/>'J| ҃ANF~5g :{[t<|q>g^x)sE5xmb z]>WF-Qu:B>8%U%S[7u4$|ވ*=gλN;G'tgsO:%e=AɶNQ-b4jk0 =g-Sv)l/M9ڗhMs ?lk bpu+sƸή\1xiLd|S+gf2fB'0[􈻾)ܛ}3idP9E??i9e]bcSKD % !k#7@_#") CKBeM"K:=v;yi=a`X"a$H[]}Ԙ.?fJORۻ1u4f P@}#^q+9c`|z$̕Yth@A{ѧ1 ͞w0|Ѓ!Z76`,Q1Ւr5ViS *\҂z>kTҸMD~QYy/T(}>Tr[h?LBW6<6⿾U"|WE7 AJȠT.ژ2+=>EˆtiaۀWVXҍ0%ە|~62yΞe:^)|NJ"b0`"ip!*~O-K" 9 >jFiO(SIZ&!)LQH@ZRhV h/JS"żmNj[ѴьQ-Xk}!G# {٩y -zd%~Gެ0xNFPtm}d]O;0`!dZHucY-oیd-=8*$rC6;z,$x86Nu/ HR4gEΪn++1/VXٱLyF}Y{/+{.k܎ki~?JfҸJQGo?|McSZ J-NTE+s5۫ÿDֈ}m,dXiavu+w1V<c 0U@RB/DΚU?(5RLn4]b>-/<;+y? ; mkv+ۢ(H*V5*Φ21t6h/Vm eƦ;7)i`yIފ=SZiv:@ am`ݳzs^Owvp ;hOvLݛ;g̸b]'mZ98$J]ao?m 6 '(kI:F-SԘޒƌHQKO)h=mIIR{QWEݯ_5սD Z i= @m>z6ࢩR:JjE|I>\Kyvc%mLמIZ (gݿ÷ԁWw*%_!_j1v I獰&Ywy"8-|mHSb(@@˟hҋ[ͤnL?{ahNÕl FpOml_Ơr-e)!h]T䉧oP$c!F868$- m>Rيy$Av$|Qd֐lАd"PS`5#M T${f9+VdPĬ< 6VgЈ F]jx k[נyV24@}TTŻN"G >T5f1ssr2GzPJl~UJ"f%B{ !D%KZA)}6fjgőwNOnŕd-Rk&dyj/vZߩ*^OSKTVOyI1??G^/x]Pze\^8oFO~tyMۇ ScYg.W.~d0 E6N/Ow)bT>^д^oAk^sٮYa<\)BHM ,[kG|Üsby7/f/Y4_ JS#cLH+%6/GKorg@A (!7pش)kȵ#4'=hvݶ[uߩ~':ǀ٫OaƦ>qnDߎG k(x 6 IK}ObR{dV,lGB2욅C,3s=oq*sԉ9oq^@kΞ޻]O9$jږ3Jry4=Qb4wqZ!fvՕ$K)$zCrmNn:Ӳȱ4Zo,L`znFv-z{ /D)8v hr[נE\9:ҡO:B8?ۅ<]ɁPGoY X9 {}Z$YdX#A`НPE 0VnJ~˷(LBx Zo?<7'ʿ,&Ov̱{9||*2vUdUSƚgX^*E$l2^:9h ƱQ&̖6.c3}_ԫLߗ/ɇn)z 7A݂. mlL/b+kC~5`B[x 唝 VY+iRt6KB!9xuvNxW/jH2S1|M.7ŵ6,]9Q6&IZP@3spƺ^žXQ RzIK25koDO$jjd}N k(%a`fdK)MIRr}M)TV_#!>L5ԖkU8 ,j X*v=Sۨ9{g@w'ĊsAzxhyYkf5J6dz1SK=ʊwie.cp_ e);1FfKAx7hZyc, h]^];sML֥]uFa>Ѻe֬V5$Zn{,;AˋC/ʺYYՌM#K~V_-i=53=< }wEK:YY+A7G?/6f#67}h4{Xӧ_XKzS~Di>3E'3{`1{sh1BPuzH;8;>Ld'~ׅ4ށƺKlJT)My*8UdNo[:=w'cR']8ҹe`#/6;CA;<Oun٥S͡k]ɑGƖdc9nDrvs5YMKnz`(N h sEPImTQeXNY?^\\?\L՗%xfL^K'rNTgPXzW @(یi-j;6 OJ׶14˪PCܬAS@kĞ3Knt6]WS!ka V{.э>#X2t$j^tL/DJ玵 C6I:E%J敁m eAyf?z[ܱ`N[$BK7];5oCh[+7w62[h"VWd GS`{ E!~Bh Q#O~2Nny_[ߍ3x4L RI[Z tkdzBbHry$=!TJ$؞H<vchKcoa-Rr D̖?6b)A)a)b=D(Ƀb\:8>RVF$ E}IL6)$21{d5f! }rj1zKz=۞i%&r1ziįamZ_iu咰UrBN5n X>ꛪ<=vZ8FgpysYZ:-25kY Ә3B MY0;^&t^ANf^*MdIBANyˠd &j"4"%2"Kb)Di'aꍽ)D-y;3,uș%L A,rae@ Ec3i, jF?^ Gf&kDB,]=A4&- (uwtWpc_hQ MYKwP{D/+kގP "[8TذmK>†L4Jh܁ dqѼr:7O_{ܫBZA("C@GS[T( 迚|@`RF 5;)mѶtު>PYiO**EUoUB; vXA[AURcI$E\L@R:^FLK\Ki0"P"҉ऽbi}wX|DИM  Vz;5¤>8!FZ!kq7ȝ ,Ufo8T@0[ոi# |qŦ`T_/7C"fc 3og!m/O_8yhqcܣwmmH5CU~٬gaJ5/rjk"KZQr35}tDR8W6T,8F78 cDZVq2Π2tJ6׷F1 vZ&oKtyW;qĐV˭ʥ"KYbr)zx`8~E(y\&gOZ/8JEo'-rª].0sjB H\/ֽZOIV`цfj`,XF%Ԡ2_iX ]It8b_i]Zw8 U5 ]lh Z4rzTK5>cNuD_Ŵfz:ڔC3#Х㋒2/haavkrZ%f!,5jh+{Su3#ֽiL;K%4y㙹P!-c.BКJ1"qHN$b xI#n]h#5Yc2Em~UT8 z_ OCܞkT{Q5Q]]D7퐨2u*1{T V\yk51W%%"^fgqZL~g߲Kf<_FZ>V?ݽ`3 V?Y_<a} zjgArA.+kxӊ5yw{مCa@1DbvǛl*9$/m%%(5 ")#@8y9ZQTdFl9Zo.=7dVz}O!z_ls>lsZz;^8ѦCj :dǥ3\HEDfJ}Cfb/S\xX C{9{7mU'/t@ b?DWޡj0$__vf Н1|8޾8n@}}e{%5$)P;}6nLXG Ek.!1 wR3$͉Yjda!Ta #"Z!n׋UP`}I LL, TF]٘fb.4gs\ )|{1tXoj].tą:6V/:9-axv 0%w]da)?dnQvQ,w^+)< 2,Vɝ*KL\%M‡RD f9stǐ8`Hs5\]YۋꀽY,.sۋN OY 1e hK- BRBGp3 Y̤ *2mEP:i|Ь! pw+"d|ŗUfY~:\͎Fs!uW tN&P|\230! w|q-LPlerjPgtUT1v̤o𱂱Zb]}e'ݯgʜ׀q~W ƏWa23N|gm~LJLJ0(|b>S7hrw~\^>Q]EtqTi~m8gx1?VKeꕑ>ռp A5h悠qD_0qtȣV2Jc+akJ3S.R$%xH(=$n)&RCCyqHl=t_ji&(]Ѽ+ս#=W[cM)bb3̽Ї,'=$Z ^B005f"-\Ȉ9Ph{0h6E":)p,Z`TQ!-S*x@b=ȝ*M┥p'yC{+(~@e=*tT%˹N_vmΆۜ3Ӈn>{KphlwoqsQ+!ed\QOi{9%lV{bPB$mjH5\(P:FPڜTi-KF_ V|p̱8J)'\8w'ü]A[~G.r:tWY[R">Jx?y aB<_e.@"HP~t,!Xlhb82IBcD~ q$_Qy0 ύGF̷Z_'Ыy-%/iZ3Ҧ)|^|.w$T(O+3,vfBUdxj!_=ruKfzqhU9jcfqT 1ֹyP1U=9 i I*~>ZhEQ`ΡX)b/Zq61R;N(gބ7#?G]9Ujݕ O@pPDY?Z)[F>ZiJkK2mr 1RA7<\}੕^Yn}0A@0"$Ff&(G0pzHԨNlź=wN`[Nd D .]hOm)*Uͳr^X萓KB/ C%6S,e<.wEIN9yH .UAB:ԁ:( Q/S g!a@<xF?FRS(5M8Z+`(7#οAHO Ʉ1(5ev)Q<ҤQPx"0>q*c5IYᕠn3NB* 0Y'-Ĕٷ%1?ʒVw!!s X?f7V,߽Zl]~UkUSlU7k"˥c~6|y dաB>%AyI?qL.+\6xQ[Rw/uKb^TxL#$g @Z8[vuwr4ˈ׋yek3>ic<)M۔f,. zor{ٻ޶r$W4f%@^6q/;3 6/i:زW,oQ;EAK4YX"YU{/9rdz# 3KS<$LC3TI`)8"h|˞`6'[5W>BB0GBZ"aQ  @AH7TZvjPÇS }i-`9%ANmH0,DiwrSg9˵QxE,  2ޒuAtB$N(<'Fro6g+Bs><9YM$BOR~qà6LA x \w) Gʂ` jӁ8WF($ IKa$\`yAid!j*}:̌K҇ ޵ET%x~XS|57׍yqK%TR[/d׷ln~ Zk jl&bQD8rQD8"C4+qzƭ'1ʭsyR;ݵվ=kH+A i#wF˧9H;m]Q"JW߄I ( tģ1XYmޒh&DXTЂY TK7oUb[w6K=Ұrc9z/wβǁw1i>f9@MQWS}J5!3SO-RJ˅w)!}M*8 ȝyE끿+L~ǶoPpix{ZYW?_${sF`K..)PR/?iOgZ~'eUi!onU9T@&cL_h9˰ڟ"C ?ޏFWئf"E*[A-*Dư~ >SB1<{ {d.N|0={;;SLpz:[ ho䀤Ux}z*8~58-${eˮ|ojZvo߮бLWe&Ѱ U}ȭ"i2XX +Ros#0y% :cPT0=䝕a G#mDY+j7X$.JL#ZTQjD6L*7\ yX`,a*UzKDD o+(556?+P÷qt\0AAFBg_;c]7NO &~v}JV u1H+h?<>;Ɯ4שs&Wio'刱'.|߳KwgM92$3F.iseȌc0-rڐ|"ZC*ߚvSGnu1cTnǻM<3V{ڭ ELڍQ GtQEL՞hvkBBq%SvA GtQEDeWڭDs[h-Rf<SΘfYD^  ͐6xrM}^sRU8y*qOS&?+=W)PWiFOuk`OeU~3OjU \@èƂRԤMyN 8~/?B4& z3I z<)'I_g#O6xf7 ,.K K ztvݍo1 enP6p _u1THYKZVJ-oyn,!M-o&!n+id{}@ud CxZH%B " 'z ;W#,k{~G[=#/]X+U_Hwy v7}7 Q9S7s-? B-  { ޤ3I\F-`HKUy[{cx2P'H*ڟl}2 W&R&ߺRՖ捽$H%'n+p,:BCmmw)7!\.֓Υ 47,L*Bd.A皁dڒdt-8+=jJ~OGK KFy NӶMHT>`􋮹":EhSRoeVY@`Ge{c4)'!Y^ =-2^-43Y~B2)/QEdsR | )mp13@Ow6f;x4E(ٗ q֌jxI M* >\xR풣 Yk:b,oa}dBy'h0۵^A O?-DP~t6] 袼-zG"X$"68O $bDI#Z6M+֯џ)q)`Pw׃E0Cl%4c~ aW+A3Ŗ1d!sKf^u0i>nwfO*opT|}gޜz0WWbWIfRܽZ}vE6;cwluemKgh1P)B˰(|N)H;#c#6X8Ip*IhaZW\(A:Ue`IO"NH%"*;r )⭑CK%[G41[i "[!%B,R3a4i؂ ƮWh!X +D@1I {%AmE=E`{aG:e3J[^c.z-4Ӵ-WdO#H ݦͮJͮI%Q:ՠz*9F :rB&B (S7̙^O!I\ NڠGY2)dzZ飯usg(U_Le*])yUL\d zNA3Oj3`sF֘6lhma ^am Yz%,WLf/54h V 9$VQOb4HASs0F(m(Y,ŨuEPCHQN&RKje0AƬ먆}V7UBPU#+tadYDc*I;V<ͧp7̓_>s}}3.FiެtY8b|T/&8e(JO\Ë -qڤM6y d7R3K;ZAVv|˧u9^?k@h]IF^`'Yur`Z[ h]\hU*seq]: o2o› Uz9 ' ?'騵Z  :YDr'mJߒ+Ѿt5@fVZuJCh^!89-crڛC!` ~m6ZPy? ?7gf.ϼKhmd /Fm6r{hYRF|][YҬsyO4<0dߘ7oi;gb#0 ޟc&-v\zs6==4&h#jQ&Y 5UacSv4o0BL?Gk pTUp$ N0¡IפQil"8/ CLd?O/E:XHrΉK$9aH"Fj~XJGr'΅|ω1W\H2MCEAwRL꣓^9s{$TNO0_j>%pzwkTaH @] "#kNH%Ip$}F̝IsP<) 崥keyfAo: "_tkNniu`ۨ#J@@XWȀH0diB6xbQ5H#mw)xkf"ddCE )Yma:cb #T*?Q!dt FɺP="s龇&h̷t]6QDlN'TjNf.MZ"I|0eB)U)-+㸩S OfDFˆS@JÖLdN^ 16"_1D+`A(*C2:CI,cq (፶"Pf+P}F5޿܎ayEQڌUWwwYr΁IO' fifdz> |;ߎSR *}tȦg1 2,`OLlOWqZk܎DQPQ yUXcA}\Rͻ{RӌAW3O8)%;Ī>I5 :W[%;N׫VHR6Lsz_ oi՗>^HԶLG}&|!Qa-!Fg$j(jĦYX[\CLY:ط @{Jo-o5t嘦Ռ}s3ծ3ct9E5G ̯- 5@Bƅ Flϗ՚HZQ-bU (ԢUS5 J[oz?! {xF)Yǖba}4 `g5~~,T ]PXTB L L>&&;{IsZHѥy2dgbYα,v)6%ZlJE]f[0rUfCP)F?YVzZY!L( 0y1z 9% ׬ Aœ^-Vah( G`wzTǎgUbTZIúcZ;aU8}[a0cwmm$~98/U}A, aG, $俧zHK!)xiE{Imd9Zg͏Ȁ 0WڥR$ȣeuYJsR8J-'ԨЇImy4q85~5;k8ØWGh4$eOJx]mH/ٌZ/I"پDw/ҁ9_1NE"Wxe7B |C/Q=IWmߛ}P׫%FԚFˡ&6k-Jͬ P9R̻\Bo+*,%C fǓ&jkǻxzF_,Q?]BjMFx A=eh*`p+1w{|dWU$^ /i˻f4^RVc.^WlXӿZ*$,Ief]XXyHBs%SOtlj7Y Z8vK6񴶋[hvBBs.S1.)l3,y/0]?O珳X`1}=~erE+5< WJzZ}q0~|(&Vik]i_qzqz[߳+ݞFqKpk&!J=킯Wv;Mx@6<< J38<@v3y`x.pyP[)u^N'2ɋX"a mQrHgGV7HmP:6LBפ;ˤQ>aT׫?Ϝ>=Jx˰}q?-OUZBBC?/^%:9_Aю>0G=>:s H̽#(n`k}ڞ.u1aE;4.,X/Y1Gf*60S/q(d̎rj3ө>67 m*!$҉ex\JCoL,(]i&cKtY*Xx{c2ŽH"fIUi&$vD8?:Pfpa 1e;3 +) K q0A%붸gJ^[Mz5w"UH1 {/TՄ\h-HNz1}SKj4* v(S䌮UOևktԝɍO dlI6:ZQ[ 8i'[ qpƷ4r7ΙUDaﻞ?8w98GqJ}vsՇV7e4.hMczSg̨cAwG*2M ҎJV/Z5Ĥ/x88=^x:q(R[o kĪ\z]&/J6Jt-ϭp #/Y.ccV, ;j2dbF9hD4 !Kki L ZX}^8]Hŋh˰6=PRBɝ-+BCPRB\ (Jc,lȍ7VjCBꂨ#Hrt>*4ŬI ܗ„`KUpCaQklWZ󒤡6.ý% SB esqWj]lZJrjRZWX}%kQ26dKG;I#or DRX*m;H1/eDH:'4Ir x-$I.!LuGm$(1{z^x R$Ѹddx*JxEsG e#{RT]@ xKd[ⰼF(3KH$=K:1v[ *zb6}䠅)kܬ гfh[o 6 PdeDz>fwGvKuV3v*zn3\ W&o9SF-s#gd+BV%u=ZVAYNizmzIo84gTڏ3 5| K.B g.mЙp ᬒP)#ubz9p>$6P%\1ꇤIH)&lP[VkuRy͒RsWm7EJ\JIRn"QZ3ysҴ\ KdiRZ֠/ig.B%9@i<ΒXH$|)aWhB,j4QqP'}B}DmX:}QLRdL^'I>w,Gb뇌-jSK2}ݧ 03ȝA(n2 ,.a?|zZ|t Nvly5f07ʞ8'zsZ! Sc]R5 -|z Svfi @җuҒ3fO0pǁ) -9pmF؄1ȹ'!OTBֺQ<_×ůW ,b!_o(2_**S, 0U"Lx Mapq_'e2lR\d@,I2ANW眈M͹񤺍- LA ֊hlĸ,4& ,Ĕ|7&W\祇` P#]Gڢ߇Fh#괠r{fVe N̩<-3sz̨l=IXHѤeT[т, E6-3g؃OІ1 ҴyEB^/ ,iO#nwז0MEwW?h)͉w7-Kcx w SX2HLIġ&r (Ņ$5Pp@`#zLO̲LecC-4uW/4}:üdt*u [rC]e^۵x*z#֯ WnC3.,j\ޝ $!߹)NNojzh[_Nwn"O%L>KnCHw.dvv Etv;.X`nsFhClL N:u~v[kis>@ dc.*痭"1JV ki"s1\=dwiʵA{ȱ /A2t_8/> 9qXe-[UtY_TP=!ٙWuwu}O!9cq0cwVTaD]A#]EA (20W0 |̅($\<7]<(*~`[Icw9r2QPLrp䴓 Z)ΥFB=r L(3*#WkԨN+A5 씒7t²9=dR WV_~\(}E0w3=Q@פAIؿ-hC{H2}I.\5Xa qrD$jy)xCaGS)U\x *>H̗a8n7ۆTk@D|&Al;sE*:ETFԴ#*KNMCa!?8-Pk&+#򲡤DQ,璸;v Hy$lhL "d*p^W!£DgsGYPmnS: 5,Ԧu (* 2\@׈P8.FjkC~F.yN1ڔz9_1xNlmRۤhJQvjcN)[) QcNuW+=i+J^ ~v*}m1wG/}^귏niz3rev|}$U&V༬3FKUlh|b XxgY}]ԜrQ˹ծkd6iÁj %aL!ó _uBM!F R7’;z;P^!adPT@-W46U > ^6G4E$kD8Ṕ -msSU JOJM\v߮SM^ĭZ?Wvb]ݛʧ?o':\|I݇~R|v,xƖ2aq`k{ۚo5@\ Lr!f|wvVܜ3:J-*)}SpJٝ *i]S; RI~2 :V4|QʂgH;΃ɝ?G +) CS_ PiE'̙e:bY"h׮}5V=vӻ:Kl S0e6Z4֪RB{`PQ5:J'BCM0ҫ[h ^$Iet4VjM"JPTrLP7h|qLo-M.WNؓՑƒnFR5Լ]B01˩PI5* c4Gb4b\\$f)ϲ);YPM,ϲ{X)hRSٝ3zT=Q i TpivNyO)HJAWˠ}h8zHS[:pi7D  2y`7a&hX\.~fd`Ҷ:yj |_i"y"l^PD :gO3c}n(j:^l}VhBJRb ^K p5x,-`:c  aG0xr1[*}UaЂjk{ڵ.3-ZZv.>s"o-B6p(+wX\Hv|:\r9ɵDŽaGӒE|;ޝjD'6V{KJg-n5@ P)iGޮSZ= 2(A>e5p}F #ǰnCl }~Hɣ*>m+봕݆MF1,䅛 "8 qtkWF2(1ݦ[d9Xt~0a!/DlΖ(=we[faLvsP<,f[6,Ĩvh\j,,@Ԩ42%} E'3O5`fblnuў-j~n.֌@co+@}tVwdwz@0߰LyhF&]!nsiQZ@81l55QI8QLlP^-ۦV ); TBh˱eq9)gړUYqEj鼕{Mp4g5kV_/+uUO {^ѾgBm-y]hKlkk%ִ>*d;!2H4ZGlmyek ʡjɓo0 ΁}E@fVc}z|uVXйڡ 'WDJ&a9W ZD^F' Ԛ ͉yx8!T?ud&_ ٟ< %r^_|!ovXH&΍ [o[tK郻|=(Xj'@fO8,{y(q٫c TM5*6uu=OCoFA On{d)QQ7@)(xU/AZ&N&dR"?|T!Z%h%fց6t4W7g_nGKhpeƩ}>`Ig5O҉s=ƍSȃ"k[ZEQr_?ug;Dh_SlaދA9۪f4/*i˦.lڦy}JòF'IIY|e9QN60 Wf|PlW6~lv?"_X] //ޟٚ :3ւvj/uH.jmyFU:1u-_+QuZ:Xۡ&_c Ic݁HT(FE9C"Ɂ7$Dgh<RcX=N%*yQ0w`@38B 3r\ƈŨ΅vڌ P2G4*ȃ{hP%M$k|}\POX2`#5s.$(^/)2\H@BZ +Ԇ $q/Rd9oᶣێ9Bv~cO;& [ȃ<*;'wXuny>}6z? gŠچxSV;Kݠ8Pisb-SE$]+ݓo"Zl5-#hCQ@(cX 7!6E3f<6T%3}tWi 4km6pdS-c8G7PonS"JO➣ m y&fSЦ2vvjlm8%o؈ΰksרVW3tbiרe`͋iVo"Vây@XEpS)Љw ]QPwiA܊W'\顀fbZZkղ⵿RN,lPd NmXB%c8xbn h@ٝ1'kON;e6>qä^YF?}%_zc;G"v|C{Q0]Ys#7+ ?xvϓ':pvk['}%(ARϖVY.OS>=i2d=9|]ϛzf//#C`iSeArC&K~B]/Zq[-$qk53S˫Պ4_B4nM lp~jCO&)@}tv}ާzS`ُzS'(‡}\RDj Kj^?ҊZ Y9b ^Borȱ ڞ%$:J]$q~5uUϕ$qvGLnhe @JXQ#ZTР@iMw(E,}G/Do˥NybE@Lq64͞x8cG&Y@~9TVX3X+f%S&NA)̢",@ !e!*&!(66H axml)YyEAM HEI`"Ҙ&>Eb 1"tZ.+Zz;>onHv_:~JLuȄFJo$ͮNrj$CFPSKFL&z[yPZS<| %$yD7ַe@BTP%p+2{=՚ڕ7t:hNg4 :̶:OsBYںaw 3WLz=@@]pal5.,p` [Rs\КڭTնuj[gxۯ௳ѣq9}ӨNG"às{b`KZxD-5;AwQO- &H3۰=NN+@tGz[/w#6zigݦʖyh8ݦj+-zX&_k]+VQ-DEFxD;!{fTR8P8bk홇؊ky>D+SDSnSn3hLSK,T)""@ `041hJ% GQED2"fH]TՖy:}{Q:k4ϼi}V4S$+VENZ1wnAa:LN,`r{ .' )H4Iơ@L6-MGSH5w֤[tA {wiQ;2j=֢EA'7N6%~$ݺʠtwݾ+TѼ[gFK['7.6wCZءw*6m&C{Όz>,On 6%'*Ideud%OXQ@pV *o+/k3ц<R^3Ðٶ~W9as>9. z }w~**M,JAK,fOM@By"hL#1AU^B4,ZfKBМ: (ф[8rV R.yDSMn )f9ڴ>ꠅuz.]1Vy)̈ClXJS(n{O@iP1fH Va ..p A m )t̮U%qyuE'1\EaV*}% Ig3Y3_98|87vyO 'Md^MW]Z.?ݩoC׺1t3CE",VC1%OXꋡ?qdP%gX&1mD:p NER!0?-D$  pΎH!A~tS]ZУ|;!Z95x,t>8 2?  EXmYw`Co3 2Ʊ ېOufSN}ng伓 rV$N>ܤN _OOVt\>ө6ޚOw:zcb`O%{LeOv!gX?K~=݁5 soہֺ /_h+d֯^_$e[vF@$5Pva.a-RAj]Z:^P-jG+=D+J)K'tgM:Z!Z)ҜFBkMfl~Mj+FP1Ǚ$ ;L;iH!p)&cK;CҾKt6kPwAL*w%=וˉˇʅ H"'k_A[srt`8O{B@V#)5 2uP˭,-)wȑׅ9ICmr`ri_>-m{ 5U^woSേ\e_z2ӕ0?_#?EW!ԽSBK+#vJ~e(Fo]FP5=+vW;?JW2F:|1֞_=\m?vziV:G= *rsLu7w-%POG6ANA+ VH$-iA&`1aE'5A9"LJу]( U!g* 3L-I&DSKZbyF',9ZcMn}ݦg>6^̪ӏo'A0K+s< fɕ">7 ՘{¿ v7S2\>=;xu9E/"@-͖:+*.= g/X@}exOeʐ*3_*w⻨Kמ%*:V ~}zs+Qu gK){NMgvv*ѣr}U)>pm7`1,"H]]\lHJ@66Vh TV XRNKQv:9 #[H_)'𙥭3fE\WM06fT~)85 zTOo*$v1:tR+,el0'%*Xg9:k|pBOI\YeLV>Cw%'-O͇rȥ(s.;M0OwnT6?}  E3ziYӴS3շJ\zy**Ued==ws0i\v&?ۿJ ɥ'Pk#"mvpLNz.ҳ2c˞7U&I_(p T CĻ.4pF\ߋh^89Z#2AFoH)H`v1awV&ղ|CR2+{a>kRЭTZ/Lrs,^⡭%Zy؞t׼OTxx`K7ŗ?}CpK**YSLVO^<1Jr@R ))HYtgԞ*)]]=_U-RЊw@†*8ڠUWiZO^kApL%ה#䵏FX"@QMw| ׅꉔD( |M5XCuyB1T #&E![F X5㠁Wͷf^K[L5nlUgaI E"[oArϥcMrsx6sQ8 !rKz$zo@NRDF_SsmHP ?|7'qITI"lg7]H&%edR2ɶ$yf =0 `CүOR{_GYj)1i}Zxd m羱+{yKކL{Z~lsa*Ee@W"w) gTOb9l!|bφ`! I.__y)e9V iCPL] tD@I-tgO#-EደHZ;m0۞9U`]R؍4_"5Q&,>E[جs }5Gh%b. neGdeyG:=z]G'75%k-Eb5xޛ7x]c+,_esZl0i FxmZNd[ጲ>*A; a`T\;p\jJwlA* W : Cn\ P)鋫xfbyּm9<ܜ\|9[|I-7ȿ=}ܾ8o~%P448LJy!ÌjJJ7DXi+Bp/|zϚTKhm ˬ@^X)2+Q ;ZA[iYFwxƑ"!fķ"Ӯp,n|`wrF-#{RϫQb{.S,zmǴRaҬQlhi[sqRҴv+I6rVzV 2J.BNJAYi-1KOJ1q!j q)&VRv^OJhXtH'aޔZ@l'miWX_i=IXiVZs\zVV>>WBrVzHMcnJOJ!JA 9$3źnB-d;{mz`כR?oqN[iN4)mESфyf{)`,)Z2F9t>T)ꕸ5Bc {"-t!mo2$=pPCĆb5dLBxFϒdQ-F'<18lzA}>AF%l`6gܘ: Cۧ3S{UhoS9Sb\Ӽ;negQ2Zi'yyNgZ*DN6N=}koR_oIugU><je8ˍï {o|c/k^O3oijcAT~PK;zz|.50a퀮QY莪/O^oPWДBfj~; 5bmϋ;L?Np7?z߱ݗϞoz&;8'v>}aϐa>5,`$뻃uqC\MCs|M#]p 2_OHٔ&DJVTds[ 5e Md%A%8TVd )K;ӧ*/JEƵ6MڊD21QƖP ;E'PGixWk\>'4hd?fS;>5C%i\b%s3$m:b*!w2'n6 ͙`Fn گ y’0ڌA?=@f7EoqXfK7[U]]5Jyh3[t>;tEw$|”:MP{H2D2d1=مb#u쯎Dח g?{Բcݟ@6k}׉PLԑbu2rF`=aW}j2tJ!6EZnVji?nꕺ燽b w>ss|2w-KlxǛe@r9Пq~ZO~f/ooէ/#pIۛSI=!&sN*>;=HLVp'aޔWWi،gYR46'I(%U3!DJ{N9:B%yeb)*e9% ެOss\66I5֍u=*PpV J24sXjm-*f I+#}kie*Z+6t)睞#Jvq;B %]ӫKeupURN\P}z`M}NgJ8uܦ~8 kSԷ rrCt~9"ާrBZ9GyHc<x2^{])=M|ƐFvUGؗ'Ϗn{<)?$7x~UyXMBe('fB!mU< a!iRO+w|ZS6Kd칵|.V=n?-H$SrtH5τDFզ!r9ۺd slBH=|ʏp 5{̱12P[O!;unV9oo3w QJ>OdIZ ِ]oһDTqdYTap*g, B9u+Q7w6ohH6'~xo@j@p;-OB͋ KCo-Ͼ+b9kg/A_%j~. 8dqZ$6 {܌D;s!^8y_.^$[Ru% \ڼjb<4?ޔ右=-zYYw#S4O>5`~+I-bHNJ潗+=gd / N[~K [I.k}squ\ja]]ZPL0h)Z0Y߼X|qoOߴ8gLv{/zMK2pDJŜ=\ߍJ-VtBèTk$JɸWc( s[A)(*M*<YBa#4TX%|. OPjz,K27+&dH |X^Ɲ\ӊPE?;ާ5ĀzFSջaF+ +H0bvAs֨H(ch2ģIY T8QnUL9t.)Ñ7drC*m_1N%'.脜(h48ͬ86rzRA7.Y8iaf!W,[=qWh>2wmX^Inl}j+? N1~[Q |O|7{{SEia]Rv2ԉ Za\LzxM_k̔GJ6\gO18*{#W^f<:OZTl5@ʓY%Lw}.KIcL+&7S FL8ŝЀЌOtQ !k s^:F (&~Á9`&t!:ivzvS}OG>%&.U{OW'?ևcʟW!&&ALeLl _7Lz*)eT J)J]ʇH]2mڀz4f1GEދFy>rNqY(.Ɗ*XvZ# D.hֵѨJ`t|acʱ!}8&2E  O8ג,Q4Jܲzg4R [i#X0 RƋRc ŮX ~.ITcRF\O7hv2rU@SwrY*QM"Qbb- mABؔcwiQ&^M_T7[)\h0#싧E*q)˧WiK*^ItRRռX\t7w X˯flYlVYUzc=<=]F/fO.ǫ?݄JWhO}~tϋaX<\]^rAs~+MF_~qv>\]T7bQ>\?Q"B`r)#5c%9z?u;˿^ hNC*;#X$ϜzXg~&4H'hqW➞o+d> yDR̃׍ OeP޲\zヨ"HЖU$cX ǹ0k Uw-V0-G& `0c3(::bp,X-SxeA_̜*'e32q]HI]v̻wcL#tߗR'FAgǃOGUkio"?V:rj #ų&,j}U<490yӽP͗v'0Qlh3-N}HWJB%'fZ^e  .I^jj\n(Pɥ-T0jP#8͹sZ6㤩~ž mGtwJ6vѡ*u%s@ +M_`QgR0UrNBYV(=5F ̥bƢYAr!YzsmN e@8ԚQ`[:j'DSb.SF6jlRT7`zFy Jp,myjڻ]0Yx~Jōmq[a[me\f][zĈ3(bbv SV\p"2gz5{\VrpCWʟՃ߹˙XUߜm.V׳E;CENedr,.(UD: "XXXY"s ƂB B 2ɋB5I*` %7ʯ]OW1R꽠'갔)5(T- ǧ4/RCMQњKXhz*W=dR\jMvppls=5J@T9iQCԶixٴ( QdO Cva)8 ZOCP˕!(QN^s4v^kT7f0@uVD:V?ywNl5Kц颗ڤ4imqhճD_FHU/cxߣ@fN %Vn|fLTd*{FkGzpTՓݬԮjkz}y_~$wC/~Ofz=UK^,Nh;vT4^jSbh1{NZk"x}eבdIªzi%ɫ7KY38鏴|]nWlEmTh# %efK%EehC=">wCR"i֛Ek͓G/?]e7^# ! U]z?gMs&R[iH9}ߞ)O T{۽(ĉی$( jw{/)tʳ -FQL0~(ȅh~j*)QeWy$Wg0c?![KP(E! aa` Ia J,,,Ma 2:Usu/0G Iz8–ԓt^D8IK->:",,~ZfѾ~1H"-OKVUv02\ͽ_qc;ZdNbTJOE] U<)*PѾ^Ⱦu o:Dj=hJLu*.WzHؒ!MQFWH QE ?)+] T'#\tڋf髯#~7Ad-f>ik M6uڛnт%rt,4' 2 M~]+ahGx?arXn(Fֻ Zb8l'qgZ^JKAK$Jn1? pu#6k_j7;Z LQpO>kjۚB'6qGM!aMϟ~i h3X޲DebU(jFߎjrQI (M;^9vXtbQ!)^;_8r5ϸO,s$'j ? TS- "(5-ٻ:Iѡ ɝZF"A94?h}A L5\PRIT o\kF][=HD_5TZExC4JHv#{\zXS&E$O .6ɻUd jNr/aWYkݍC_·˻a]o-1~7w͢|2 jS|{kr+K}ja*+3R[z3rdෙl}gB/9q' $n=O \J*o/nvv{Xojuwuy鱜BrEWf2w'x'<[Pp?n]Vܖ_޵W2'eu_*EjSw 8STKoHhZw7+Lz$kp>2 ) CPĉ\JH gL|ќ6U#UT*:kpR _^oђ2-TCO/Tޭ}`\?u]ԬO)mo SלUJxNq&Hha=oďS>:c(z#//9ҽG5m)!TP*ODŝ4:$5( ?JpԠ`"!jMX x?5zbDNRN -kliZd^jnGFMIx1šaa/i>tnZ*1AR'F-wbB"U\ ̄t I7D AU4Z~G\Uꦖ'NRQ_ jUɽz#ƔezH:ʁ4Se ޶k/Iո MfMB5\p;a8tp"V` J74 :"3N/e&Ƽ!31eZ}Fi┩~$h@bmL4y`v(x𲔜Zʄɹ;i5A{ܟZ1SDnJT&0qSb) ',T+f-hȄE 3c^ t׾Dp3!T͸vwgFf)٫f6, yEƽuN(Z& HST\>nm:n~5T>p S`ឞΙ5`x!:`#u@ԖHplZtL5 F)-~>~'xC)ޟTwJ=XB__/67(~ jbu踙x+ s+F5{?I)cMX?j GbzjGCQE,5p5m떤s=<=" cl(px!Z 7&W~ 7v^,|_[?@^k>ai4~MϏnynZJ~y-?a A5ۛ/ݝE䴎`a휲%OUdPΣ28rW R( =uFV'5f׷ן^G$]1f譪o>fAױ<}HV3$W-NHZvHMcd֣ȧZSRq5~{(U+cMңSH:/qIO sшJ3\$Ivw `A:/,|3ϰxo_pyt6yDFsjcG3M:$ѳ~׺f=w$l`ovp_mʿ֗QD^eYɝ7YAH@Pm1vzDkbol; DK+(ƺ_9^z\ m:h1OkӘAqb{(F;6ZD09 i?1p;kMS+]ۉ}.u :R3)FMsTRƠKG8yNc$=VK5Ek=ժ~Fy&؜TzFW~dYc]SUo sG*V*鐕) Fhm ?ό\#E 5 ӔNvݭn#twjAkGu"S{\fP ZPyt]=a Z-{]>|zJKҾߕVZo>gҮYhtAZy* Flc2B 4zK!wST#v}{/CwLUw38-Ν=~q,6+nVuy|faK=䟼GQޅ3L KC7A nν]qImÏW{+V%dX E[a+e%-V̇n d _KF^G&z&c]o~=]Aې`O7ex C{;KUwx8(Փ 8o-PTtG:!u|?tbڇP'u6ajζBȤz`=9H( Z4([Q9 %Y@aqJM[:Z{3RN !ܑA4bx4ӛH6Ց^N٨{P@'w\q ^N뻺fNu1%{ }~1"t'I%! -4j:$"c~RD1txe-ޙ}ڰy ^X&9KI\KNLBPLTs(uc؋Y )#J~(@Y0 UXf dS9*YUYZ"!aU[^u4 is郐In_̆Cat{twȇPρia8+CFp^>ehv5BXr}rLPLpLd޺rdߗ#K;BCn86qx;ogt Y7Ȱj D?3iL4Vڹu]=F0h±ir82!3G4tRVw=%^hI ^F0Ĉpc-lqL"OcBZq)~܆@~6`[c!?RAHkYd&r!uw͡-vp !Zh&34M([{^h%:AD_rߝ OD7L "D-3= Sc.1a$DWb!b=w>+ZZ0(@S,E4haӾLTٽ$XANt -@?!a1Aܰx;Up ?x& ɦ^263P I`Q:^ww?w'@J&k|qmfV_O|׀( S; G_Ȑi wTCOQhs/c |. \p=IrX,y;{xXBqR+j.BdNG8l 9GOAW}~ua¶?(s xP 5n1=p;v`2(clNp2g0+#@Zρ 2?j (Q^ϕ' cJG ӹZگK:GL$(tJbٯ[硰$T϶s~- tVr8F?$\8?{ Tr[Of LxZDCީ| = gLkz|7d!00^~M֏dֿR?J l?a~pm;f[]:cG/Ncl*G *Wlz_ Qi:{Apj8jvq9x>C> H1ZMg{{{M 4\Fl\Zۇy}X.<9Gn FqEI 1cx[Aᱮ#P*mDV;}}uXESR#+eK fXkǍvoM02-Mu_p=z9.)gdz(śʒ/UjRv\y*>9,YWߍӉ2\-Z Pb_:M:B+Q^;H9lKaLSk4/?Jr?.ZtN<'-朜\Z{g5ݚT@(r4T$KӁҬ,DʜTZ\Yi6ݺWǭæP߿ 8wnowìp;KMɤ~z{|mzg=jyaSY]n ^nDOI(jq`9@\ D !O+4JQ-8BIJ >4p\/!70ct tiM7@jU!>~SṬo= %W EsJ-{@3"D>B$pXF]nڕH$*K(He h>_H/wr30.v6LuQ̰f63uu5o;繶-*3L,'i6V&]7T+C^.f>BrpƨbV.uqnSprrZ3Rr >O>}Ӆ\~h?M@N=[t([41\z,$qKJ\n ᝻Q^4ga*'i#0C .d.z8xצp {T2 맃Y/Q:ѺǻuruǠ2IL^ps!Nf+&c |54>IIb-cia=mjANwq*G:V]4S<ȃ`hqݲTjPJ,?I|ZM@Mz8Q=OrI;']q?qشBEb2{O\";uΞFAHBxh)c>R<6b&LSypN oov'/u#t#Ѯs4Yv+H|F{c[pz@ H&R=24z+{6^7{iD zN;&nrf_v79 Y|W3#-|1,VP֐^\%],+LF!ݚ;Uihq֍wAA}΁D~fB) A%nع˹ZǺV}bc~ 7ؗL^ո܏\/9loo;i.?]™}X Oѿ]voo}_fc$[ke[0;#f XO_lOޖ D qXY FpCh`7G{gQgSO84jeyƷ^>F޽ޞL *^HԫלTqV K"YrTH2<ρޕ6#"e1;#W(ԃl$eȲWGuW/J۩L塔+F-`0BW #tՕOxb`_f,NKymd"cT^K/ZK h).SKQ=ޡZkk)D6. υqB0 ԙ/fyڄ?f9[ԣ@#a!`|$~[ 1 Vxn돔-#P5/NhOr(,|P!Hs=LCi2d! %(:81I"R ~L@yAx|l盜,x$-ȌI$ryWl[)u: (#Ib7t5$%msokHhko wLY-_}HZՃ H%h .?"@XN1p$xAGhV^II8, $T!DPZ"lHB:/+.H}'p<KEgBN;B)Q"͑AN(ԉ2FiDRBSڀA4`JXRȖDʖ8JZmT漭xޖX\Z /&)N:BXZC}j^ÈAU7zWXi b9}|Im;EȡvOM| `ÇnÀ1ocy3sSöaFƧ[q?4.(A][4++ 5*<K4&.ƣj(obe0';dHț>*);yߍGC~+۝`{ &Mߏ2A%)8MTSJR- [jU*_$\T,P"yjvDw`q,.uQܢۅݓ9cHBl/!y-0;E=b[QWKl 1mj @]K:<:iQJ<2^1cm^׶6H 6ݎV_k w)OQ>?:xϯn'cᗶV] <*'+Y:hRjڤDRFSñ{JZ=whL8\Q,e)n4ݠ2w՘Xbĕ^+#{:I>O#ܕ9o-/êXaT$-YO6J_?J+yh]:*`ZJŻ;w\}+Ի?:-O;oN.Pá K\3R .+u~x;'E\˘[C}vrƽPlZG@UPD.ܲNE^0'Ju^MC7\^MƏGP蕹 Fe>Om%i%7wCV/aRB}=)@҅|sޔY2kP!T)Z'H 1"RlWڼu/@1ၚy׊+cjOkH>](` dJ+3[-ދ ?tJd!띎*D OaCb;Q+ٔm}]t`!/<'AO]3TL U Jk #Pa'f-u/EMvezn4vRHPZ9xJ1 0D&yH "i}{uYyu0/<Ŏ炃(&|ܢc(4u+dA`կorf74y&័ NcN{uҥ &ϛ0?o)|3U*1""1: RQ"UHj ;5#!~~`x|3Xw*M'3k47FZ ~Zuۼ:2{`y /:Nl嗟B_߻tBz=C ӯ֫DO2NיL@THV_*oX[bWE{'WD RD(w$^ SpVZYǔP24x k JzwR6eU0 -O23_nsz.?y֮$w{KB\Cc2~+q@Wj n?8 }BCE(D+oetľdFƿKpۿ Ĉ?)D LbM^xbD xr05_f~ÊxyԈON$ZŻoP_o"*RN@/Tw/8HBj^wN-=FxjYk%j)BHtzz19!SRf [ J 42Z[$搀&Vk$DMuḋ2HՉE#ϭ˞[,r ]SKQ=ަZ2{-h- R,}N-=FxjQ,;ki)*`ŠKZzZݓXtBK|9RqkEk)qZJa -2NKՄ0Li) RvBKҌjޯ"sqx.MKS=ޡZ~ſl-4NKqv! RL4Z7k%j)8-%Y鄖@5A@ek)N\MSTk&3LRc4wF*cbMfTc[jN%ViJIjƉ0O.vJ'*MSR@ t1(?iT}TjbX* x)j8aH0Xpx$"PFETJ:aǎQ=ަZ۱˶cPnǨoS-%}KR,~:NxjY]ZzZU,V)tBKҌj ^K/QK)R9;iiFĨU^FRo}y9^SSݭf'v)`aIbY1@w;K0q:Rjي1ULxqd9qϨ̯TS:LbCF&.CJHIDžq|Ww۷P# ,jRO?SteJ *'^U<* Q@xkpW9"EK%RJ!lz Y*OoNAѺVs N Fկ 0aHA5Hw6 ЈPŤ<&HmXz _Tj.PȈڊVr0ST4КJCa8MF"ꉦT (J1{xJ}|wo[TELZ͔lL^jYYV(.I;{1됵 H`D3)P2- R`$zrmwgxDh@Luh`jv,GZ(LANgAAKƺ{w 8_XH!1 >*2 0a*tf3;ݙCR,x31yћ_Xu:AJ/ M$AY9d0e }}+ H׎^KEe=s+FA?Z#7jB@D"`.zC.%cD0@`&J#j-I@I M#J)<9  F"b$#FTqxrCUjAL4$H@ D1e G$A8C1 :ONd#)q#12"6>ئGtUaYI]wLgՖY1,DDR-M !IB$TZMP XDTB f[>l3똋 q+8}2#P*&*K!Xb,3Œ8p)N $ H #4)g:"E);jQ_ JDzˆtC(3J?r)WZ;[E\P)QF`EHqB.," FuQ F~0R6BH/Q(G#'FmٻlD@%|ݮ "DhOW/WTdкԨ@,:,%lT]k/JPa =V2ʇG#|N =;*( NDV9Vs: ȡb9ӋUi+N_9gtFr\ՑY!#DiCi[$Ԡ#a~Kh՗aR" %*)B._u_+'R2'S1§6:7:}7uzMap?~V,p*%Ng=Nڹ^<vj?vOc"Dk$g(%W'|Y*"gV8Y=nJ'ETNVꠘ9g1_!?Zzc2hI1F;ot('c*A ͣ1*bS|'oS-HHa ɨūRb D'k{~u_~nWS;/w&#=TAuz-샪XSz~_>rO3=te76lֶ$ 6?+wyػe@nLj@vru0z(e',@NxICrS .v,kmnƲE/[3~[LMmݕNS@%El/ J2E(I%n .ι:nu1pQmTnGN1BYS8Z7h-"[7f}nu1pQmTnm61oͺտTѺА\EkGxO5luۿba%p]dVߠo.=w+1@V ,J#s;GmncR>HamﶪJ='>K|p0A^C8qoR 2D_׈i)bVK -=&m^j0hRhK12!aZ@C0*A@F(HI N}uzD%rDX)D%)PEДH)nT ưF14NHYsUΊO*&S'=^;Z?r(bGH{sY8Ş/BgY|gu۩ERG ZFtL[\WU\ k>x&9|[8txs^C.Kqޙ?SYh8T:>5X+}Fz+)h{2gP6e- G̗cYb6{Ws⍄(a<~xHJFIm*01ķy9Μ/jқY\ν pf/1}ƌS?i-n9jQ (@YVH(V$ZwSqc!';0o>3fvLI+Ϥl:#wS7jα yT ]*Y$WXvJ>PLt̬MmQc~O]t,ëF]m"גJq6? a-n2 d3\K\Fxj5vpWxcʐ*l4{WRuD'@OjKkܼIg( 4뉜Mp<5i|aTDܴ_b||U eb*a $(`cz,cY &I9 a{Lќܣw7M'D=:_[ 91Yƛ& 9> x<gz'N4<6Y2,kM%ܶl[`ԇQ o}̆)3DD6C?$BSRPqoO BQOOB_(2p- ƹ!.0&lr]!H+4EMjMfˎAs xlZn>3Cg3t>y˺qΈ3gG!(M B(@)[xšx<&N 5. (BHK|:$K X+Qǎ׎xEYfA7W-~S|a"L$ѱJC,r9s-=שᯅO?~wGp-}|6妆w }qn 0y7$rez2OcF1Rƞ`m8ZdXPL,a -?SSWQ}L)fO " pU)HxE:A3R$>&R X_90ENO8"`!H4hې#6MDbP'$B Cx)B7.+ 2/SJUn$ |Sg@qĞB(sZ@.UDr&"ݽ1Bd׋t(/a(ڱZc QU(a¨%"*&*0TGCqcPQB%W!swЅ@'NP?j`AƥxMSeH#j&P )IIJ `J$"щ#1JL1S׃F.9Y' mڕ|߶LJXM_ބ~s\zg91| 1^5ۑDEq؈,b҆U b*5Yk BB(IlTF1-/ ^j$1#l靠G# ,$2C&q%"2&(LTJ9hL" c($z`h$F(H]'ZZX}?K&!e#)'rWOȍ)ǀ "to\B}Q" DDV?ޢJ !0n{0#W-ٿ+J'J+@$%w9Ʒu533(Ɓ!`%L DST|r·px8'ZJU_-9bݿE%mnk-.Cue|bn a_s@}`C ؗw90 E/s#qADڎPh㹂= JƤ;6n҆G˛zzduKNwisqMl! o-֤ҭX,1FxeTB!婤i}uУYc6L#k}Ed}WfUZHtʣq^Dk(Fr[9wt˵>A8ĭߍ)4 ÷prq2Ukd2+KV{|e:ey)`r25XQ>Hv7}r#9@{30rQ<ˍ̚wfb~ٚm8+n@lr3zR86}esm:[Q%rܴvt7B>(7I1!i=f 0B68MB$*]C1+C}{,>s !u?8gcv"T0+ ȥ03q_q2Ʃ2zhjۼ/ب𥺑[T clՓ̸d7lJ>Wϙnn;f :2V$FGAj9lm3Fr=Y YB֫zuWw`vlրvfDh5M`xMnOpY:,†+(np+pE \!ZKu2 UaZ:o=iCnqX:+%/3 3j2ca#,Z߼sSsnPp恨;v:0sLZ7O!)=,UZfV=mF0 oR-y _-^Y)BMsMZuU.SMF%zOiulLݹYS3 ]3ߤ=o(ߤ R@ĺ79nLf/ o9WY䵇|*kȊ3|j^vwjDD;ݐ ,P)jHmm&U?P~f6XgKm@Wr5x6MXd1{l7]DM/7V?r@ G ňr*3g 0Tp߅tOpQ]BK:⹄H<5>c~#/~^c>4+"fOtshS}卤OK5 vz-D9Siqe;ڱ3 q|~s?_FQ[@eh,,Tg3{T󊖦/0oW 2ER-D-e|묖 :ƍ+ ؗcwW sGrV*RT2R]EMD!Pnk#_|$I8 6cVM$Rѥ&6OkqaQٻFn$W;8 ־LmTXאR۞ TQRNeGY/H _RMaڳDHj 9QeN-9wH3 FHE%2sQl_C1g U(^r&)12BDpV~z&|V˫B`<8žl5h^= XjKV=tmn Ptӽ hfap[hp_Kl = WW?Y?V7%`x NZ%>@5)!RcKP:Nb~4G&8ڭpy/!QvܜRҎӒls7EמӇE5>SڄծA8OڄMz 1Y.䅿]^G ؁3(G4GthFkjBx{CxBkGTGo?1Y+Տl%{??]Erðo><7 |i&F{#k.3E8j&1t!)b>|*,48&rPݕsSUUCa녡]gWE_(=S>9Ֆ̜nGA[vy~G+S6/ R@T&/yF3'COe ˂Ѻ]&yWydΩGT\|uƨnâzD5x>(DT=f%VQ@08C IzQE#۵:דD !Nc>TGۑN1l?cJ]A8-AH)tv ?At2tS( `0r;ۂuWr 2K|t}*(A#eOݷW/$9n8i 6w M_K)Nۆnfu@xsl:j: =ǦmU8 :Ƹ%YdFBAV\%^;1j'| fFf4x:CՅE2M hLf3J8QYEs#<7^_ZLP 4jhJuL&\qw?͛=_`ohꪶx\~oo.J_@|HՀҖ\sBu}73w\ݯQpuuyyw:\k&oϮE1N E|32]w:gz^@ S^_CoVj%MɪF?9úD[ 7W'{"TԯȽ4fnEꑅF*a=-74X8dgGG˰'\3}j(*>1Q @ZcD 14Reݝ叫p~|GuL1zuߺY]ԇcWbL=QD_`ORy45:æm6;]q~>&E {;Q=*Q`R lv*lt`Fݳ?T}cVA]4Dd+a( ݩTiN:6Xu:Փ\5שrTSoj@NI%\)ڽG;mjxtMD.+TG/pZ؇eh#r@y/-K "iq֜nҵrcc!C5FLk0)|*QXͽRET/+Qf؉7;UAR[/M^yiLoҼ%7as\'oHG2*~qĜʅ#RPz,Hv#FQ=]׻(T%&.}~yd?X萍"Q~x%=M۲(+L*%yGS/S)i7L) RHڂL phqVSr8BQ1OLv6h@)rt D4#[hI[4m:Ëk%8Ĺfi|};cD{piroVk s"+(/.)2 BWL,'3WX#L*8ϴ(9" )i.,#ageo3ɄrNu(H2^SDS,cN ݭBQn~~BnNVChQn5n&HD0VQ4()Px.7uBq**D"yUԊOu[i\R$|n!@Sl':ߴ{4ט {b`dRˤӳ#+1y;udq\{ "hNZ|2F2]q؍ Ec,UrcD/Zj|*LѾc[߽ԢGQ1zTd`kkpnP}wdwJ4D5M݁@偧R?Bx!'Epj? cNB " MRMS 7XS  ~\cw7`-mvuN^7ŝ)GX3#{$UGe kdg$j[fZ.~/gy2opٶ@jd#j ȖG;V :JhԤk0)_(nкSEVzNViDѭ<:8un{[yt&q2``̜_0h4ch)bWKż Aݣ vɻ( PF!ͮunHp1mt>OcIal8&0/ =8o 66)قFv̽;5}()] l>f}OI[m6;tJ6!auGdS4|QOQ!@Ժv0ۦ$Lw̤s"A Lmc($DJ/ #|pen?OO!NxTPKW!i~BmAR3RX^!K|qk9!RJ_ʯHTh>Tkb[jMV8+2MjCZ!/^HJ_rg_%JI}-$ dJY)qjV pBZoQ9Zf>Ѯ'r*ggIk5'uĐB5Fpt&u^]*Y_|ݥމӷ‰PP Vm֐'9eR6]cO̶~o(!+B=_DNbJH@OS6gGŗ٬*s 6}^k.wB?mtt9q́(gsF,]}iˢoea9cD8::ro;h1cܿOipCtQAd4͛jqt#}m.ЯH'(5+Hs8 =WrqvA!k$`/ Vr%p€Re,4]@!T9$ ^w%LrjdnB/Wc8{6˕oK}qpլ?',zne5/oV˷~{y޷i5LP IxŠP@-[fß+L祤A)#VQ'/RIQdѕao`Z]*H9&ycL3j2qQ-rDw*?l:JهFi Rt޴cǫ[ WU +Oqu:ñ@-U% Y`!u&ޡ:$632}bI1e >:VkbO˾&D@ :ciT ([/5NTTX0F8QXm>UhsL6I6Y^?Ϗ{iO L!xCwehsI5r($~n:P =}JP5 q#`OSAs8LTυSA F><U qΘCBhN@̀;[@Bʵ%p`瀼EkSlii;k YG qs ڑ@J*9GD&-*M`&= tE~5 kd$=@^9'3 +]U園ݸ?=m2 DJQD$rɍY 2G )!4amm#GE@v4s8`2|فAv:ǒ3oQ嶤VVVY|,Y29~7yrgZ"KQ uP. ]:^tAFɔł(4!z{m+їO^,7gvWpq{.M_uE%^,N`m1ѝ^f JOZHs_}VF]Ơ!7j@Qe:=g:wN74zY-> 7{|g3gh&rlNsr[;~D>&@h zﴐ)9Ƶ+e}mI:^prB۲RM0zrr3(1'an\)} FEGi!v*ZA'RdNDicTC.D9Svy@HFH'G>Hrp^ڐNWdu8W}24`T!7:Rh})'Ց, F2 x:0ޮ +q+tp\LhWxx'yfJDwMawQY %xSo>UUn;ˡ91VmQf8lCVf0ļ=)tWa1ƹj~oohcȐS]AVmz;Aa:idT61d̹i [tkJr%<\ɣ[E _u1C^G=(_6; ]pШtsk* :u  QL[ ZZZc8)7]1gBK]FIrI@PT[CDd"ZB5KOVrđ XO;x. 3_eaM`<3nq,rNQk-W:́,g&<j$1s>ΠL,{.3j+@Q82!k>ц1!zYUwVX^ihKOR.eG@p*)ϩJ5`JeQ~TF9eԣT'_00z kPϙM}*sir|VL-!0%y:ńA2&diUᄫ!`Fzw_%Hk\q<la݈FK@R]KNP"Rmԩvqc͠S UH?vyC4iLj7!r-FvuVu]["[#cZ>h߯)IM Q.$EiP;@ EiP36RTYef4U麺f52JIB-(MRFZtZӓMN8l !E-ΖzoQlI_`$vBˠ ?t]xr>r]I4^:\޸esJEXyt>^Ǽ!Pu9-p:ÝRpBj)Z\Z[7 Q69D(SP Z/@ Cs%Nsk(@n? Tr'E/,oIp#KXHp)m`)Be"+Ɯ4pBZ_h/4%%敬Uِ-Cg X#t|` Oc"ZS. \sIJ2&2oB{"ms"el"`J`s 1$Ϝ _"<+ \AP 40 FbAS;cL帢G?=AeRk@N@^A(w iI*YGB`xآu'vMAt K^¦E=[n!F[5jh%]IqŇ!ҚAmԹp:T򰻾5}yKk#c kNz!Q@ejiJ3,mPOuS#^p $jҨka"Ld7) ZGghEi[|B| 5Q[f߶$z[u.R^J w .|_g KR:.ovph׽[ JZ nijbrd!/94R4zNHɶZ_#'!~vV1 KK1Ɩs4\feRJ蕎gc^_xcdRp(U["OTDI<ٟM 1^ % Z;,q /CUt]ׁ?K-衷 C]ڝ`exfsArpT9azjxȴLP 4Ɂ\U,f3m.40YXXsC OJ#ŠXr1YCY.T.2@sJ`iN4x=D>"8p:ohsZȌr3N0%98ZI*hoHJJRJEBbQ]AXǭ]Zr{ObIo9KoX*hzYon>{l3 TSdgOoFd7g).߹KFLw,qvw7d[`Z(#%}TP{|6fGt㖗028hEN IWe9۶ 2Q1TJ  J-Ze 8(ơPY奔9P_*Mq˚hJRIR@i~}JbC;8 dğW_hyAMW?8_2d??z۾u ͗q^BL㗤Fg [,1ES0V8p9zj.cfG,#B~ 0]B6"Ƽy\ʜ쮙1'*wxsg qy`B q"7@w tG39tj!!:RcDsٹ g%jg3tί3o"{$ . 2PdHa6<4z0d'JpXBй<nJm~iO~4(}GZo{J|9^dq93α\.Kfb1J|![֪ T|~*gq7ܰ3X[;= ,V7&1y")#aa7+cX]yVE^0lTu#LY*bЈUL%I!}Xy8j\@Ou]dq?xAʻ8eCJRIBhDQ5d(VXh3[磞 p%b{6~;L m􌧌KrT]AhӍ2%PaڏRqǨE,m=M|g|sߣw2{K{Ncq;9h1bo /0 Ӝg=iɱx4$H "Z†?V/Rb{$$/L/Ma8˴ߙɌkm6eTksl[)%Bh?v]L0'Mۯ.M.frI6b{?]r)&u iGe,vh׫J8̭:eN%Qp(Bȣa/_MA1R;e0-%k)ZzLKf--y-=.^UAy #cR$WkҰk)`ZzL^K[KVO2[pQ'4lZܥ!^R' [zLޖ^@-eC@ ĨzU?,c7D'˗uy-m .щMV0GbAEj{m;mZΜ-|A7 7@> VA,BbRLj)g<_F|H*xP;E2 rڟ* hPD@hŀN4ZH892ָ#\a.ڞ=5WU`wgU[`,4E6qp0kmDQGӚĵ@L&a 0 Hzkv1W/ƨ_pN! 4,Yilu2}E[LȡSxĀ"ȍOH *N[fH %D[K>~Bc9R[܋  kBCSW7"xG@?[7mn/A1|; ̹Q8g ߍ}|84~].,koS3- UQo? /6?|/G/σ-?=Y90M3Wdpӭ3_cQQ hѳ'4n?:}-{V07 q53%&;֡i2x$7~2F|V'՗pPV]fhIl .FkRCA~Bkˋl;aoBr*u+[!BzP|}[.-N`_&^=JSNN!Ā0fr[cr:̦O鿽έ$Yn[ɹmP% JI*"$C@:k4uݎG[mnWmՐv>Xflmvk-쐏O7ﻛUhƏr~w|y2jxz\|Nqn>y|LGB*vj$Kw-.!CA<=wpl66G>A<Μ7{Ww_=ُbSz6&no+_epT@sM:${\ [^HXD@j ktZ|*Pe#2e:3@6nby/:@GQmդ!pSu~ޱnf ZN6Xy_SZn]Z:4t{MNnU1Rmn\E;ڸu|5AuAC*ZЩ|u棠acGACp((DG%ݹ䗬sL WԹ:(6ZzLB"kUk)L^xH9g,"Ec")`)/J Kl(TAHMEc- :1I}_Cuc'<'I'E RbFy}˪ʕW:ԯ:sb6]8naƮ'nvDRCJy8$ 3f%of/25[`\L {|ѳm%dX.i0kV텫 UϾ-y/&~ ̜꯲Vaxq>.}QF!rt+K X`glf+b-9+`.-zyY(|@Ǘ녺KDNJJSLЭĉDU9i{HFwqIE %)tb- KRL|D g .H~2{ͳ)ʆ2^vsar@P4wiW|ѹ@;ju bA损ϱS$*MТB@Q_.#[ G$Yh2d ixMj$ \^]39X:N۪D-T#(ګgP#6<򝩓^7#Xrz Amo+hL- M _洿vːA|f?Nj{R$@gh3')fw{_4 I8!W".b.D8RJj !> vrZL&@@g-T(Vd) 0!b,J[¸ kE/k~M0:Wp -PcUBPH4ut*@D J|^qjh ԠC2rrd4r\;JXbb,4کY *† P}7eR!r]6Ϸ+ r^ףex!ga$0{)raqR :Dҩ>S*LULe!`,v!'Eb p.'Lpnz6&a*2ՀXvr`U6#!]V ui >>>Ξ^ sV%s-|QKrr¶tMb) gZ.x ˱%ac5_Gyu2靠p=ɟ8}mo-QQt;2K du[]4 Aڰ, Dr_bzWJ.wCV]Q+;ߑ ހp s[%)$!4r yXH&aE1)Ґ -):4֔K"ȱ"Jǐ[ bıah'fC @7_&*zq}9y:,79^ /)Kk޵3L&UuKgڋ&J#Ud2 tJ~Q0X[{.FJ)dߺ z O.||2Y.:1A%m<`v V;uaKk/4k͛A4,+ )cIOs;,ݦ7"&E_Q+ohcҐbOrKcolbʘ3Ew3C{ϫdnDCS_05ªBkdJg˟SA)/Ex%Dǔq"'h?+{l\( D ɍ[3VOI20]qg #cY^3H+Xa4jϼ{bpz L9\wEMƻJ(.t6 όfhJ EScWq6ᄫQM&A(Ƕ8:ikXRS>h|An,df(`}}S_ucEc D j%->4'8 OWzE,` 7H'Ld3ڐ@'J4E:HbTsھDa-K,9TdXE kp%WG)A誚 ݻArYjRa_`i"լ3mVLBˤп!I_ЭJe@MU=]A;pKu^*qʫSSEu뷌圝ƪeCޭɄ,Z~]\x)WOE';?GolpBCUKy2ZE M!{}rwڱyvzﳱ6@J6ڞL'D`Jaԍ @X(;9p!z>S%q[] @sê mmK qvvU^mm_Ww&_}Zum:ಭ'X.w]@rM1$@T]YYm62;v{-M|/WIeP?~u{$]//zu/=N8"NRщåe֑8/4F.YEv, (_ݫFwZ[Wn8ޤORSs6\!IEnҥC'..@|Vf{O!.G$QoayLgPDqf@b RI"ə@̕,6:g4'8Ӕ"P8raBD L,kV~ sh4P(+)) -@!Q9ƀJ0d0fT09+3 Izۼx?X)"R9S7?i,").Rba1=<@ĥ6hZOJ-NIiouR6ObtT'%@#@aza p3{Gr"`b`8| 0V"ģ{wA+O@T /ɈPiR,RJ Z%Ӣ`MKӌ] Q4fH^ζdaJsKm+>H=VtC)t#|s]a”v}5Y^_=2]iqM}򓚡FCѓOsfS9Þȏo|ڬ0&s ڹ3t e1yN9/冂P; Pn_eBa]DCʮCI;R{>"5Rhq|c@aqxR#H)exxԺ; PHIk&1pNhQ)nͣK%&rHlweh0S*p0p7SmCX^p-a7/&, i1:J8(`7c$Oa! ~Xup048+RcP;~xjw~ ! /b_^ja^ORS C^jA5j!u&JR/N9/_fg>'~?X^-Mvj/UU,^?M[Ǎzómcm޸fYNtFi-q5mr O3Mu&igj7ֳ#MlV5Un;7nQ6U Mu&9nVfDĶF\k7ni5ޭ y&eSS]q{7U;>wtbnc"yxOօqmmSRR|nU = Пv7ǻ6rOnWζ{.%ՓHT `z//sKt,od>߈}) 'gV{.v&}J9(ȡ^㡬jq@T+=^:#}~ 1R n|b쪊l<?dWr9K5ԅ]U\@%FVMJ .z^HDD1TҾ *莫Kt\9JvYcQJGWJNP$q^J}u ٶx/5CN1U'4Cmto^E RKʾ}XZp>jr /KN, Ho՟YmfөՀM#ץz@q<~Uh ;[G} Jkq0aЂ޾]P]X#ovn&Wrcq.آ'/yar$e+)KdqQ"/r)s $+96c3K3ns0v%&IhdD"!\jD,GZ,5 hҢ Q4$ [p%h0{BSUE\R- 6آXbGAa$s F*EnHI-$\c1!\m#lV)oN''XM4Ʀ<}(wtbn㭽N[|wB޸x+c&kc#zX |L'6.J6wOsn]X7L3$HzFS}pP )"q>OU^5]וYeG+LY-HQ9_~S;8kf}t#㒽'x7of3.?p0$zafzu{ovDruZ=\_c"Rd)t:,lgOTFs+”s&e?o#@[`釪{OM[n8qxT3MVjቱC-T$LdP#/Ny+f_mLu,!X|*lY$L_ay/ʶZ]tJG*Ue@%TwX-[˨b* U4R»p-b'I*Q* P9FZ>BOW=fPNv%ص:ۤN?TS߲6!w3Kr_]orsvpg^ĸ'`~=j̻?m!Z,)uA]~NV~c텧K5tb:V0xp,_q 4P  &$ N,XvC4-~_-5KDTpr8%ELS4tUj21?`BzEh7lX:ӁAʞ=&vt>moGD$SdnDÄfi}ЇnefD^1!%^Fy&H!3V !x!T91f?<9s)' 8zy|`=Mt\ @GRU'T]V|Wv톔IQ?{Wɍc^ckEI倻p@Mda2ٞL'UvMuUWW$3UIHM*r!W`5XJ* W w"wN@Ƴݰוi\,2KF2uS~qoxI~Ĕ6)34&uҍ&5qBiAaczFSITRSIc2e ~, ƒxIR\A]2W돏w?>X2![;[+kQjYWQ)Jt-4U7njy(@j_~ nV]_?)Ipyz"3?LVdʱڐ00L=`!P\랢vl-Pc6@{sa 8+cb7Dm 妶u\Cu`.UcRg-PզYBuO@wJ`4ޔ1fE|BCқ}jV.@j0!F qjzC(PqB{жv\% kK2 |_d; teKfE=";rP*j$V R #P VU*wX&lߞ}< 'ǿG%C:R)zSC+9 prV(ɚl`-ɲG8N~,Gpn&{sdQjr1D*,d )5σ$K7^\!ObHfZAvԵ[4x:g2Wr"!jbaQWsVq _5ug*|҆ ? `;|@mDj~8)A,?gӲ\Y=p㻛exgTʐ,Sз8/n~>E/2\Ezв^t?M},ꧯ6]Ti/׻Dcb"'xBW*˿J~xqې7Oj-0\3Y$\kq9CSɺa}R̖S}I "w觩뻛)WT3/OZzZ*wq2EwQ@VGWWTE?o6f0eYAm`ժEY)R5G)2 _n`Vѩ21*6 ЎbskmjrOqs[+Rk1'5՗ųЖbJyQhK; 􄵔}Ҹ"b0-}M)g-=i-^'yYhST_nR{g-=E-5,R|BK ˴Ԭ)g-=m-Ew-E_R㟰z$ݸNz3T_nRZzZ*Jt ǂ QS޿NyŇw>,vq{cg$A1y/֟e弃=~yǥ]ԤͱPz%x@>^ޠM=x'˅8G͢,vE;{wlG9zEqѳ#+95ou72D4!_!c:EG4p>_9\ 0V-S6^6aY2'lk#kޟ@86F<,Z:|} *4A^=1vHx!G u@ߔ,1=ίB4J[QcIemj@ݬHB6YfILU ȝ&? !jZ!|]?<ߚIW)_2o5ſW7)~nݟU?57_viW? d "%jsO5s_"n\ᢒ4\Bk,9ZKZOK eUmeL cM%\ЪWRt,6x;][ dk] ZOd7V*itC"e RS66"b,cYAKFk19Y|x:&$_<4Uy—[?,}7T_؂SML<8釟_?nF)/og^I^/^h=E?[\|^ޭDbÔ6/|6 p{=ut1r^yML/.ޭoƩҺM%i=?3{N _gIS4UxNo^%2#2Cjp|X0ZTDF >$pdGpN[נׅ׃nsaW(ɂBcH#5\Xh}`-\&idSCX `JfrP3}M0X5}U]M_~S^VL\iE7zĔg4td)m`E{qUy;c/ʓ(ezGy S&&ӛ%th{o5Kf"CrMz`9sj܈Mo4N -ש Bפq1&}SM$3lkҗ%BJyX/JvTu?g%O/+ 7ݸΙh)ZSMhZzZ R,@7Jߒ-uҧܤ9wm9m-ՔqIK=,ԐLK;&ZzZBGwT{g[zZ_*ы4n fOS}I{n?rZ*6ˎg@W 1JWT;'ݸkw;SMDWFi\`oU륌7Y}ъRdϣ༁^w`'^ch8H_^1B-b~6b$tK^`Lc.Vqm3X00X/a _Ȩ MYzXr<-r)Jܶ+y+L,ڃLRe磗SܾHZ^y*юoDgȥ0#˳nn矣^q4NzO1dw`7JVNWXgQmK1Ɓ,hFGqupu[渋tT5T:8i*} mrNJ6 C^D{`V611XE,CX6i+ `ꚴ'|*=i,i؊Z+yBr(Nj>zT5ZstH*.M d]PemZu--ƕl*ZVYde.::WEx 9qoϝ7{-[X.-J1Al GZb[6i*T@*NO*KN)Ӕf x"wqu6i$zw`cGC6V %;3WIMlk}AN Tm$ިG I |)(VO:W=f?(Gmz.*%lYe^֘;`'Yes5iTPxMc= & CBW1x276M2=6֐G{$  ky4'JrwKgL\HevXJρ apkh%= zߜ Y VthyC S4x"Hj &=>m${ .ܟj6hMwmm:L;%^}s˾Vx(R,Ɏ'lKNݒ:q2Yd@\HǫQl=װ]QW<.?]Kj楰rϦh؋['=-oMs^v6.&gOr_Vxyg#SjgCwmKcV9y?Me^ڵ?A9FvU"W ˩b Nkta3dzTU޻0J:s{X!nV' u3T:=|pqK#9_a|pO1Ha8 l,u>.QCݼKԮ;SX"8 W7<}>)qv*}0{9q-a-H'7,[/GBG?? ޷?(Y7{/^CG{6T -7p9bwƻG$ǥ0zE򏗣f1K.52#3@g+3Xfd EL~p[_t8F]bZϦ'unCHo.d=W Un}iEtv(is2SpC8?tnoqбZ{n",[<պ1' μ<x>¼6oY]'ɿ 9+j 21@ °sR[7 |rYM_~ڢ߉~e@ԡQ"%.B䪡!.cmHN\@TEJeJ$n _{`-q0ݖ~쉆,ii4s6zm/F,^ z aSXlSlFNK(VpCS1T5&e"DIn!bG8I1B=a*`T0?X/$ɦv)ZqCg&z6U6BpQ[zoϊ~o_O=q>aٲ#sPYxD:IlI6mm3.T*~ R`d>޶OJNV=C)Ȯj0EEkK*hӽmtDk&Ry3BΈ"=$X0\ꨤ*t6>Yb> 1 H!3}vO ߪ/z5g rM-@MBDpUa4y8^ڪOw_RPA(YndbG!v/,ѧ,OFT CT|mzC I` Cq,&]?X!t\soY$0fb IYtxc{dN{yC mC:7% Oe:py2G lG'?Ǿ+&CG[ܻmt#*tJyIp#u $ʞ]]{F8*F' t|CR;fZ+lF S9ǖ/X|`f&d&$` DWkzd A_z5t3UZ ׾}qD[ZRjfdp c[BASv8sHgϊnR1%!}  3bi`4^4ݏcRu }@epoYsq~$HP3vxn\< Y㧘c=e_l2 #<-oORK[# b(mb-ɲF94^^ǻhJe!3 awhJ;t"hi~]5(M4Ì evQ_\b{j9?UY;4eP6m(|JتatgW\mxQA}R˻tLLDR h'f}-̆,Q9)?l;kC$ M}9=|կ]{ՎXZť wj|1(oAK`ߓ% AG'Ѭk(0ʻ`IqYHwmmKzٓEŀ$9Fes0I"))^*(F-Ygz櫪KwWl8f8DDy4D&Y`lwV*ѥ>ê JA8'˕#sϘCYS㭻v s38SbM  @},MX Ah-8.X-Xy X1&#ErK<>*avqɂ(6]T1Jɩ\.*JXBIDiPB7?t -g! 0uo6*$%~۶iO}@Dۥ.gs,+8~v0!U<OXpJ\t1%&h{[Y*ƞЛ!6ynC?֤aY9h[3HJ&CqgTWT>l3 ;~`q7) XuCq-c\|OCI\N 5$=32h˘e3iJAt,+U?\gԸbV2j' x(XuSd7y;\A#۲ʭ/:{Y&_~x RPv @uQ@0p^9^4B3pK"eP7<˭,6e3`4NF2ؐ0qyz"+@#U1L,]DsBVdmw^r4D<-f- 0ópVI)SQx!{=EΩ5qOvfkzH~*Ir.KEI) Xh~m gѤasJi&3~>(I|vy02,F?0&><ٝ诫U&'A-ዓsYRR`D3n8HO\R|hۅ_RޫJ!I>T҅=E$Xd?8c`oxR[aʀ{luy&@ nUiu\dkL]*;W4 !&^ȋ*MVQ`=n}.o̪bub wq_"k- އxQ"@e<<)_"H%(c,=/+=sYY6+aHr7n|6:T)| UKr~_qǝOِT2J("{4Dcy~?qgMçVN&#F JTe,K 3aOljŘ"~ "/}t&4^B\bXI2C"“F|LDN`bEIu:~:>>=kQc+jiXo! EO&jr^Eٲ<QHFtz߂7oVxr d R26]A2i]"?ܙp l\uH“kQhE ޼{=_"W%[.bf p5:χp%g\-F>)jW`J; ,x6g;8sØ磺F0p>~Ws|^DŽ v"907de;F[~ski+<%\ëΖc2_~^OO{p/u^A@, os$jEy{S(f։yDxļdǠ7~t2%"+GkbuxybUU;!:Ꚅ޿MFgˌtOM|+O.DA=6}gp 1*!*j}􈌯^c|&Oq 9'88 gvwٶjƶŧ~֌swsQa@gwopZǴ;7p;eȚfalnc+}7HPR5#y~oŻY^<]u,2{Ovx#wk םP/vtt.)'* 5sJRDZƀ(25蒱SK iuޭy[p?V;r2;3'Xm@ :"ޘ"}0N.+#(LA< %bJ׊-7 $Hx[g*k)b8|$Kir җCUecsJ:qI RgK2SmvfC,3rmc1Em*GFqn77.@5}Vč&xkoa>QƼ Ƣ9 @XIŰIYUVsuQ/_)YmA"B>ϓŕnS G~GhF8p+)?&B'" m6+ iE+Z;ZS;ǂ`9%o hhrG[ސo`vMEjYިz#^[,eX\ٯDC>S\Ү=9gW5A/o`ܹpŔ۹pnrܖiO3~gIb;#3Xx$w0cAEϏkB-+PEvQğ ]5}]Cdrw/ߏ&{я(?_~+= ỉ[_QL' yQoǀ>xbk1nC<(λb55UZoA G?Qy}l[_.ϜRٙ>BJXs0%qCv|0^<Jk_Xofͭv0mW|lO<ڍǯ}8fɼ8R)AJoKP A>{J^Š+p;^m%$# : 4q1HB(6 ͣ׫nMFl}G yUB+K^q,42fQ@!iÓgh qU_=2a3L,\? } ;l~C q[Ȝ/UMhĈ 7/iJp<{0rhJDkEkA;n, Y[GXg+h@Hz8 N0t{zcA͕W^~n[o>̨ex9Ͻr1[.^a9O/{ُÿ^"zXmM@^af,k[S2-$LǗOdA)"5yimoVj?$ǚuJ4pM-H,On"[Rs8ї`^44Bnk@+{uyA9V/hƀAX*34Je.˥drDр֦[Yp{|qhT7V&$qc5TyQ/A藑@'1$ANA$zk-ϟqR؇G`*[(άdl#(L-) A0x`CsL;~p:@? kґ畴ˬ=Q0DL v j=Xn[YRu_z ~lpDQ<3E.S%^IΘU(D,QYd;>tnpz?*'8Dl,0^Tz(JI\Ae2T fXf-RJp-OY]YS#W+zw\ <6=a=/L( 2BRkqw{Pp(N~'SpnB-r+d ^KSxzq+&t&Gu)Km:  l(BoC27;!Z_c"˭MaC5PfY47F8(G+wYC嚮+J!f3l bvcXL1^I/2V)X/r04H;rQާA1Ti~O> ՟NPgZyeq3!Si2 qQ{7 +~<8JAHʁ<0DrF< Dd(165$smSV|' {*n F+uW@P$M}YMDV` kb API=a􁡖x"N#Ӻg+'ʯj܋`I}}*iòMJ*n[Rf'K3xLMUhBB@4'^i)x)MymZGcJB^GR%j-$Dm ZMdxMdsmw:D2f\ n#PĔHR=,o%ܩZ>,C XaRE-r3&LLd8h$D{LFH 7\zVS׳q3z2̊I="ɯq`M"4U%[#1s$KeO-À`¶\J(LpR`c 8|~cgb<4B(DN@h9?l Bz -~@,f&Ǣgo2(ZPpb;cϨq| k!Zk pL$~w>@0OXP)rIp&vG} ݔH bHsu"J#:HrK,Dj;,++K}R@VA%\)4(i!s GrؓFBȉ1B>r\0gw,bifywlD)@t R.<eiAGց^8ZXp".qc=d{$X^3nt9|sq/̓g %Kfz>+{z[056oZ7&,u@oCCF ̇$|09 ZLE"g1\#Drj|#jkۑf17z[ڽ2[V &5{\\A)yܭO-LR[2@[%<'WB|79@DA`}hݝROt$ Q~ſ?dHcp4ɅKm9f効~_ EKvg .طCnKY#BAVRK_4ok2X' dֳ  3h@=L;}m'1ClmJ+' ݱpޟ&k0JYU ob* ^}#[6Մ2q|t(w6l͎LuSS0ft]Jzk=ҷH/-!b;rVmlӐ_älf(_٢Prduvqpt~Fu4$LpA&ho폎LO'#QMY\ 􈑔ƉBR౛+:OHw|rT(E7l̩'dKѷ2M6΅)DXJ9b 6<5Kppj- &fvyë 1<R7.?.5/7@}  /o1BiM$? pBQ2[݈'Tߣ ֤vВYᅱ+4aՎXg-{ I/ާRc]OTt 7?v>A~^^^^U47p(qg{j YjD^0DcD џdTt>8+=g)!ffsP> @kV<3]6}+jJ3b7tVzT:9p\UVm$[##NĠQa̦޾B)im {vum柍0EbxoӽFU% }ӞO!N W:)[\u2O}f>e+ \ne2 upzY Zz료*M6JD:/ȩDA $!wj˜F`U}bۘͺw+4@-FÐ:vc0pDiVwT`Qe U-3ut5QZ^iǽKIl8E2D.hequeX` \~NhH$(=yz.֏"n_ҿJ8'T#W# >JG>΋R\q%_gW'ђ)(10u%Vd~w5cc0lpJp1M)VGxo*œoz ﺛR .U|%$5#c"c=fBM>2ގA=@*DcK|Ӻjoڠ֤ڂ?}ԎњG+fxeύ%+n(8}l^Nv?GY0m^HDVP$Qe LCBr(qJ |9?7[BPr=-1lM p/J>CCf@5ǯZ N9qUs ??^ix2Ici%׿mȟ>B[6ϔ?0SZ6$&(n /LNG_ZO@|&3Wvg,rֻnpGwyX~Yyf0+pUgSoekj4ݵaj6\/>9A]9T2gZcEnuO<3E-j؝jcN-9dt-twa1(`N:+aII_S0u>2g18tTҡ\Ag+OQloB-)FhEz>ܥ.)vrb2M7?)'䩝YuKEL]fu[S NglݞH&Mɴ[󼏚ڭ y"H8zi1 w}!N`qjCYH 'b!DiH 8uPXjTۄ=g=n;P,..!ޖQծ`}{xt^.uLku3KuʴqJSzJ5}&aZXXhEɁpI =bʆ`*fqcl擐|vhl6>>:?|:id37Ζ(܄oIi2~G1# ԳZicXwm΋&eҹ`MZcME4HUΐW8DqhP\xy'ko)LYiE3`Mft]dQo j,ru]NDM>bsLxș.ׂG c+D{0i0R+l,Jﬡ0– m@!#rSf |:vԋtv\@Bp5YuMB/\ |0g1gہ,+mζ~Tu׶gPHuf@{PhKCUnxq6Qϵ)D1k%Rp wrqfx+"7]~rk jjq5XR@g_ACHڰk3LԮDKj,KZx %$6ޗ2L~M/1Y==7Rv)z|7M"i5Csֱ-؟+47n ç] X Cm 69"s;1>_Rm˲]Ւڣ ͖UUd=vl_@#o  א7'=%YBT&[*_}G>l~D!'~É== JS,ȭH) F/D:Hd*r U]Zd6f)(/f6+ , 6@sk 7XP[GqL; N8nfNԺfRE ÄS^\dRd n[rD{r 3EQaKPI^GI!G&H<@TKڬS9(hQdy]~!BF~a(-%%_$~wc,`qtL$mFoh6Q4n!ct0jc84JSA;O;W2 7y3Eћj#TȣnH!ȁ̑VY\P0 T¾cy@}GHtTժ;;z԰!'hN~fby@N;B۔AkGnanD6B)x+&_ %'ïےOl̸J3e x.)$Vʹb*e̽=щ|FCdY1&I6(3`$+,r1_Ph xWTzu#nn݊g.QL.xDӟepSW MƉzFvF8Aă]6LGdC@g}+j;}TT;fp<Jg)ف]U7wufYݓz|v5ʨe=VEsh9&;B7 PZQ Q(ѺExDʈam=qhNtm:K6Lp37C %xBTBg -|[40󫇨*-iCܝf3zt)1Dw0jUf<_cesRl0ZC#TŸ e0tLh=|?Q @| ;ցcxhQtC5<3?2?Fwo`5^4=lLeϲmq[u;F*ǭpl6WӧـSVMO A9TOI5Jܝ7"Cvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000007053014115136623430017701 0ustar rootrootJan 29 06:35:07 crc systemd[1]: Starting Kubernetes Kubelet... Jan 29 06:35:07 crc restorecon[4749]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:07 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 06:35:08 crc restorecon[4749]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 29 06:35:08 crc kubenswrapper[4861]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 06:35:08 crc kubenswrapper[4861]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 29 06:35:08 crc kubenswrapper[4861]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 06:35:08 crc kubenswrapper[4861]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 06:35:08 crc kubenswrapper[4861]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 29 06:35:08 crc kubenswrapper[4861]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.738351 4861 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747359 4861 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747433 4861 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747449 4861 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747461 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747473 4861 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747486 4861 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747502 4861 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747517 4861 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747530 4861 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747543 4861 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747553 4861 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747565 4861 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747575 4861 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747585 4861 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747595 4861 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747605 4861 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747616 4861 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747626 4861 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747636 4861 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747646 4861 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747657 4861 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747668 4861 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747679 4861 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747689 4861 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747699 4861 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747709 4861 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747720 4861 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747730 4861 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747760 4861 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747771 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747781 4861 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747791 4861 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747801 4861 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747812 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747824 4861 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747835 4861 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747845 4861 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747857 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747867 4861 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747880 4861 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747891 4861 feature_gate.go:330] unrecognized feature gate: Example Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747901 4861 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747912 4861 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747922 4861 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747933 4861 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747943 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747953 4861 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747964 4861 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747974 4861 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.747986 4861 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748003 4861 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748017 4861 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748027 4861 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748038 4861 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748048 4861 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748059 4861 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748103 4861 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748117 4861 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748129 4861 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748143 4861 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748157 4861 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748168 4861 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748180 4861 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748194 4861 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748206 4861 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748216 4861 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748227 4861 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748240 4861 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748251 4861 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748262 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.748273 4861 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748502 4861 flags.go:64] FLAG: --address="0.0.0.0" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748529 4861 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748551 4861 flags.go:64] FLAG: --anonymous-auth="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748567 4861 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748583 4861 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748595 4861 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748612 4861 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748627 4861 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748640 4861 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748652 4861 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748665 4861 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748682 4861 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748695 4861 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748708 4861 flags.go:64] FLAG: --cgroup-root="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748720 4861 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748732 4861 flags.go:64] FLAG: --client-ca-file="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748744 4861 flags.go:64] FLAG: --cloud-config="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748756 4861 flags.go:64] FLAG: --cloud-provider="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748768 4861 flags.go:64] FLAG: --cluster-dns="[]" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748785 4861 flags.go:64] FLAG: --cluster-domain="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748797 4861 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748811 4861 flags.go:64] FLAG: --config-dir="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748823 4861 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748836 4861 flags.go:64] FLAG: --container-log-max-files="5" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748867 4861 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748880 4861 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748892 4861 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748906 4861 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748919 4861 flags.go:64] FLAG: --contention-profiling="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748932 4861 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748946 4861 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748960 4861 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748975 4861 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.748990 4861 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749003 4861 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749015 4861 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749026 4861 flags.go:64] FLAG: --enable-load-reader="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749039 4861 flags.go:64] FLAG: --enable-server="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749051 4861 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749067 4861 flags.go:64] FLAG: --event-burst="100" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749117 4861 flags.go:64] FLAG: --event-qps="50" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749129 4861 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749142 4861 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749154 4861 flags.go:64] FLAG: --eviction-hard="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749171 4861 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749183 4861 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749195 4861 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749211 4861 flags.go:64] FLAG: --eviction-soft="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749224 4861 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749235 4861 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749248 4861 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749259 4861 flags.go:64] FLAG: --experimental-mounter-path="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749270 4861 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749282 4861 flags.go:64] FLAG: --fail-swap-on="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749294 4861 flags.go:64] FLAG: --feature-gates="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749310 4861 flags.go:64] FLAG: --file-check-frequency="20s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749323 4861 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749337 4861 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749350 4861 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749362 4861 flags.go:64] FLAG: --healthz-port="10248" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749374 4861 flags.go:64] FLAG: --help="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749386 4861 flags.go:64] FLAG: --hostname-override="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749398 4861 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749411 4861 flags.go:64] FLAG: --http-check-frequency="20s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749423 4861 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749435 4861 flags.go:64] FLAG: --image-credential-provider-config="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749446 4861 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749458 4861 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749469 4861 flags.go:64] FLAG: --image-service-endpoint="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749480 4861 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749493 4861 flags.go:64] FLAG: --kube-api-burst="100" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749505 4861 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749518 4861 flags.go:64] FLAG: --kube-api-qps="50" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749529 4861 flags.go:64] FLAG: --kube-reserved="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749540 4861 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749552 4861 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749564 4861 flags.go:64] FLAG: --kubelet-cgroups="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749575 4861 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749587 4861 flags.go:64] FLAG: --lock-file="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749599 4861 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749611 4861 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749624 4861 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749641 4861 flags.go:64] FLAG: --log-json-split-stream="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749675 4861 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749688 4861 flags.go:64] FLAG: --log-text-split-stream="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749699 4861 flags.go:64] FLAG: --logging-format="text" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749711 4861 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749724 4861 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749736 4861 flags.go:64] FLAG: --manifest-url="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749747 4861 flags.go:64] FLAG: --manifest-url-header="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749763 4861 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749775 4861 flags.go:64] FLAG: --max-open-files="1000000" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749789 4861 flags.go:64] FLAG: --max-pods="110" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749801 4861 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749812 4861 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749825 4861 flags.go:64] FLAG: --memory-manager-policy="None" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749838 4861 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749849 4861 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749861 4861 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749874 4861 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749905 4861 flags.go:64] FLAG: --node-status-max-images="50" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749917 4861 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749929 4861 flags.go:64] FLAG: --oom-score-adj="-999" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749941 4861 flags.go:64] FLAG: --pod-cidr="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749952 4861 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749969 4861 flags.go:64] FLAG: --pod-manifest-path="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749981 4861 flags.go:64] FLAG: --pod-max-pids="-1" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.749993 4861 flags.go:64] FLAG: --pods-per-core="0" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750005 4861 flags.go:64] FLAG: --port="10250" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750017 4861 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750029 4861 flags.go:64] FLAG: --provider-id="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750041 4861 flags.go:64] FLAG: --qos-reserved="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750053 4861 flags.go:64] FLAG: --read-only-port="10255" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750065 4861 flags.go:64] FLAG: --register-node="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750114 4861 flags.go:64] FLAG: --register-schedulable="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750127 4861 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750147 4861 flags.go:64] FLAG: --registry-burst="10" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750160 4861 flags.go:64] FLAG: --registry-qps="5" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750171 4861 flags.go:64] FLAG: --reserved-cpus="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750204 4861 flags.go:64] FLAG: --reserved-memory="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750221 4861 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750235 4861 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750247 4861 flags.go:64] FLAG: --rotate-certificates="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750259 4861 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750271 4861 flags.go:64] FLAG: --runonce="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750282 4861 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750296 4861 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750309 4861 flags.go:64] FLAG: --seccomp-default="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750321 4861 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750334 4861 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750347 4861 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750359 4861 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750372 4861 flags.go:64] FLAG: --storage-driver-password="root" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750383 4861 flags.go:64] FLAG: --storage-driver-secure="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750395 4861 flags.go:64] FLAG: --storage-driver-table="stats" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750407 4861 flags.go:64] FLAG: --storage-driver-user="root" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750418 4861 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750432 4861 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750444 4861 flags.go:64] FLAG: --system-cgroups="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750456 4861 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750475 4861 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750487 4861 flags.go:64] FLAG: --tls-cert-file="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750499 4861 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750528 4861 flags.go:64] FLAG: --tls-min-version="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750540 4861 flags.go:64] FLAG: --tls-private-key-file="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750552 4861 flags.go:64] FLAG: --topology-manager-policy="none" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750564 4861 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750575 4861 flags.go:64] FLAG: --topology-manager-scope="container" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750588 4861 flags.go:64] FLAG: --v="2" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750605 4861 flags.go:64] FLAG: --version="false" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750620 4861 flags.go:64] FLAG: --vmodule="" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750634 4861 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.750647 4861 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.750987 4861 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751003 4861 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751031 4861 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751044 4861 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751054 4861 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751064 4861 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751111 4861 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751122 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751133 4861 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751143 4861 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751153 4861 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751164 4861 feature_gate.go:330] unrecognized feature gate: Example Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751175 4861 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751189 4861 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751203 4861 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751214 4861 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751226 4861 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751237 4861 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751249 4861 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751262 4861 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751274 4861 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751287 4861 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751301 4861 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751315 4861 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751329 4861 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751348 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751359 4861 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751369 4861 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751380 4861 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751452 4861 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751464 4861 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751475 4861 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751485 4861 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751495 4861 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751505 4861 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751515 4861 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751525 4861 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751534 4861 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751576 4861 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751587 4861 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751602 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751612 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751622 4861 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751632 4861 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751642 4861 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751652 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751661 4861 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751673 4861 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751682 4861 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751692 4861 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751702 4861 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751712 4861 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751723 4861 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751735 4861 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751745 4861 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751755 4861 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751769 4861 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751786 4861 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751798 4861 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751808 4861 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751818 4861 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751828 4861 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751839 4861 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751849 4861 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751859 4861 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751870 4861 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751880 4861 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751889 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751900 4861 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751909 4861 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.751919 4861 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.751952 4861 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.768256 4861 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.768345 4861 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768542 4861 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768564 4861 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768583 4861 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768600 4861 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768615 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768666 4861 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768677 4861 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768687 4861 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768696 4861 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768705 4861 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768716 4861 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768726 4861 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768736 4861 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768745 4861 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768753 4861 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768761 4861 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768769 4861 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768776 4861 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768785 4861 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768793 4861 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768800 4861 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768808 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768816 4861 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768824 4861 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768831 4861 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768840 4861 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768849 4861 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768859 4861 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768868 4861 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768880 4861 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768889 4861 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768900 4861 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768910 4861 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768918 4861 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768927 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768935 4861 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768943 4861 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768951 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768959 4861 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768968 4861 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768975 4861 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768983 4861 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768992 4861 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.768999 4861 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769007 4861 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769015 4861 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769023 4861 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769031 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769039 4861 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769047 4861 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769055 4861 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769063 4861 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769096 4861 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769105 4861 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769116 4861 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769127 4861 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769136 4861 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769145 4861 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769154 4861 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769162 4861 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769171 4861 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769179 4861 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769215 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769223 4861 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769231 4861 feature_gate.go:330] unrecognized feature gate: Example Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769241 4861 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769248 4861 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769256 4861 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769264 4861 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769272 4861 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769280 4861 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.769295 4861 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769575 4861 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769588 4861 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769597 4861 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769607 4861 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769615 4861 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769623 4861 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769632 4861 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769640 4861 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769649 4861 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769658 4861 feature_gate.go:330] unrecognized feature gate: Example Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769669 4861 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769679 4861 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769687 4861 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769695 4861 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769707 4861 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769715 4861 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769723 4861 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769731 4861 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769739 4861 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769747 4861 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769755 4861 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769763 4861 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769770 4861 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769778 4861 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769786 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769794 4861 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769802 4861 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769811 4861 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769820 4861 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769830 4861 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769839 4861 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769849 4861 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769859 4861 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769867 4861 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769876 4861 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769884 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769892 4861 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769900 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769908 4861 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769916 4861 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769924 4861 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769932 4861 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769939 4861 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769947 4861 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769955 4861 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769963 4861 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769970 4861 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769978 4861 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769986 4861 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.769993 4861 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770005 4861 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770015 4861 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770024 4861 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770033 4861 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770041 4861 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770049 4861 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770059 4861 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770069 4861 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770103 4861 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770112 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770120 4861 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770128 4861 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770137 4861 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770145 4861 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770154 4861 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770164 4861 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770172 4861 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770180 4861 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770188 4861 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770196 4861 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 06:35:08 crc kubenswrapper[4861]: W0129 06:35:08.770204 4861 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.770219 4861 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.770572 4861 server.go:940] "Client rotation is on, will bootstrap in background" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.776519 4861 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.776659 4861 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.778338 4861 server.go:997] "Starting client certificate rotation" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.778460 4861 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.779678 4861 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-15 13:15:13.546987854 +0000 UTC Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.779801 4861 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.803815 4861 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 06:35:08 crc kubenswrapper[4861]: E0129 06:35:08.806369 4861 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.806995 4861 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.824057 4861 log.go:25] "Validated CRI v1 runtime API" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.983632 4861 log.go:25] "Validated CRI v1 image API" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.986329 4861 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.991318 4861 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-29-06-29-34-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 29 06:35:08 crc kubenswrapper[4861]: I0129 06:35:08.991389 4861 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.008718 4861 manager.go:217] Machine: {Timestamp:2026-01-29 06:35:09.006018664 +0000 UTC m=+0.677513241 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:4242a32a-6d38-415c-93a3-943ed93797ae BootID:dad46bc6-8766-4734-bc4e-ff0764d2ff72 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:49:7f:c0 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:49:7f:c0 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:15:46:a1 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:4a:53:e3 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:d9:3a:3d Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:ba:86:3f Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:48:fa:72 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:ba:0b:84:b0:a4:af Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:3a:6d:03:ae:54:9d Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.008986 4861 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.009332 4861 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.012156 4861 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.012341 4861 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.012386 4861 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.012620 4861 topology_manager.go:138] "Creating topology manager with none policy" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.012630 4861 container_manager_linux.go:303] "Creating device plugin manager" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.013698 4861 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.013726 4861 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.013974 4861 state_mem.go:36] "Initialized new in-memory state store" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.014101 4861 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.017201 4861 kubelet.go:418] "Attempting to sync node with API server" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.017228 4861 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.017245 4861 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.017261 4861 kubelet.go:324] "Adding apiserver pod source" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.017275 4861 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.021316 4861 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.023378 4861 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.025184 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.025318 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.025286 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.025413 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.026522 4861 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029330 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029387 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029408 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029426 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029455 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029489 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029509 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029545 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029567 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029586 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029647 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.029668 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.031641 4861 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.032756 4861 server.go:1280] "Started kubelet" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.032970 4861 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.034286 4861 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 29 06:35:09 crc systemd[1]: Started Kubernetes Kubelet. Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.034323 4861 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.038490 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.038619 4861 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.038516 4861 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.039304 4861 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.039339 4861 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.039414 4861 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.039961 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 05:41:28.048431288 +0000 UTC Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.041436 4861 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.042205 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="200ms" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.042275 4861 factory.go:55] Registering systemd factory Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.042963 4861 factory.go:221] Registration of the systemd container factory successfully Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.046446 4861 factory.go:153] Registering CRI-O factory Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.046500 4861 factory.go:221] Registration of the crio container factory successfully Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.046624 4861 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.046660 4861 factory.go:103] Registering Raw factory Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.046696 4861 manager.go:1196] Started watching for new ooms in manager Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.047269 4861 server.go:460] "Adding debug handlers to kubelet server" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.047451 4861 manager.go:319] Starting recovery of all containers Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.048656 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.048736 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.048124 4861 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.80:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188f20220316d36d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 06:35:09.032698733 +0000 UTC m=+0.704193360,LastTimestamp:2026-01-29 06:35:09.032698733 +0000 UTC m=+0.704193360,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.058968 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059029 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059043 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059057 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059107 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059124 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059139 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059153 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059166 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059177 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059190 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059204 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059215 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059233 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059247 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059259 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059274 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059287 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059300 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059318 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059332 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059350 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059365 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059382 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059394 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059405 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059419 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059431 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059442 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059523 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059536 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059549 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059565 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059578 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059592 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059606 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059618 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059633 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059654 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059727 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059744 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059759 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059773 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059790 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059804 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059817 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059832 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059846 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059866 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059879 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059893 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059907 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059931 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059944 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059960 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059976 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.059989 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060004 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060016 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060029 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060041 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060053 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060080 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060094 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060108 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060121 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060136 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060148 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060161 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060173 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060185 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060198 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060210 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060222 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060260 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060271 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060284 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060298 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060310 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060321 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060338 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060352 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060364 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060376 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060388 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060398 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060412 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060426 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060440 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060452 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060466 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060477 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060488 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060503 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060517 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060533 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060549 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060564 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060578 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060594 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.060606 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062276 4861 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062302 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062319 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062334 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062356 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062369 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062382 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062396 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062411 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062422 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062435 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062448 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062462 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062476 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062489 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062504 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062515 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062550 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062563 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062576 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062591 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062605 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062618 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062633 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062646 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062663 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062676 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062694 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062710 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062727 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062740 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062752 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062764 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062778 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062800 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062820 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062833 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062846 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062858 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062870 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062882 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062896 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.062911 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.064787 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.064857 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.064903 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.064931 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.064957 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.064995 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065022 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065059 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065125 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065159 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065193 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065222 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065256 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065293 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065319 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065354 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065377 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065410 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065434 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065460 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065494 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065517 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065568 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065593 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065616 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065645 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065670 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065709 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065738 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065766 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065807 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065837 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065866 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065906 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065934 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.065972 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066005 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066038 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066108 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066145 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066186 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066216 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066245 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066286 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066315 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066352 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066381 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066423 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066461 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066489 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066547 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066578 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066608 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066653 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066683 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066721 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066750 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066783 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066826 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066859 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066891 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066929 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.066966 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.067008 4861 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.067035 4861 reconstruct.go:97] "Volume reconstruction finished" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.067053 4861 reconciler.go:26] "Reconciler: start to sync state" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.075583 4861 manager.go:324] Recovery completed Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.090309 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.094762 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.094837 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.094854 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.096317 4861 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.096396 4861 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.096438 4861 state_mem.go:36] "Initialized new in-memory state store" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.111288 4861 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.115095 4861 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.115156 4861 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.115194 4861 kubelet.go:2335] "Starting kubelet main sync loop" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.115255 4861 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.117224 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.117304 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.122014 4861 policy_none.go:49] "None policy: Start" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.123434 4861 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.123473 4861 state_mem.go:35] "Initializing new in-memory state store" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.139656 4861 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.190413 4861 manager.go:334] "Starting Device Plugin manager" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.190511 4861 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.190535 4861 server.go:79] "Starting device plugin registration server" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.193769 4861 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.193799 4861 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.194334 4861 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.194430 4861 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.194441 4861 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.203306 4861 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.215586 4861 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.215733 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.216965 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.217015 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.217030 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.217284 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.217525 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.217630 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218546 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218558 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218767 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218790 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218796 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218810 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218945 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.218991 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220052 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220113 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220132 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220140 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220161 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220172 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220295 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220484 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.220545 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221322 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221362 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221379 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221540 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221656 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221694 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221919 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221948 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.221962 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.224591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.224626 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.224666 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.224908 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.224964 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.224980 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.225404 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.225493 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.226694 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.226751 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.226766 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.243202 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="400ms" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269052 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269162 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269188 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269211 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269234 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269256 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269273 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269298 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.269375 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.270250 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.270307 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.270338 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.270364 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.270386 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.270419 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.294891 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.296744 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.296793 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.296806 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.296840 4861 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.297493 4861 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375013 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375110 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375152 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375185 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375219 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375256 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375290 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375328 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375367 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375376 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375425 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375477 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375481 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375401 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375539 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375493 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375569 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375565 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375588 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375600 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375426 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375541 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375549 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375628 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375664 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375716 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375738 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375780 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375826 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.375876 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.498059 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.499973 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.500036 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.500055 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.500140 4861 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.500836 4861 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.553010 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.563301 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.585858 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.612536 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-2d6c5aa05e57324be307c899911a5da192befff46df672f49f560862bada836d WatchSource:0}: Error finding container 2d6c5aa05e57324be307c899911a5da192befff46df672f49f560862bada836d: Status 404 returned error can't find the container with id 2d6c5aa05e57324be307c899911a5da192befff46df672f49f560862bada836d Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.614896 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.616270 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-07e9fa6bb0f615c8908f771178bd762d115914e25ce7c3d485973254d86fb7f5 WatchSource:0}: Error finding container 07e9fa6bb0f615c8908f771178bd762d115914e25ce7c3d485973254d86fb7f5: Status 404 returned error can't find the container with id 07e9fa6bb0f615c8908f771178bd762d115914e25ce7c3d485973254d86fb7f5 Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.617719 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-e4b6952b5feff1e43f78ad0659866788f03139c561009a8d95cecd20fc457b2c WatchSource:0}: Error finding container e4b6952b5feff1e43f78ad0659866788f03139c561009a8d95cecd20fc457b2c: Status 404 returned error can't find the container with id e4b6952b5feff1e43f78ad0659866788f03139c561009a8d95cecd20fc457b2c Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.625357 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.630043 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-06c382d1b146b74b14a095c392ebb7edf55e78c5bf682663dcb25135c73aea61 WatchSource:0}: Error finding container 06c382d1b146b74b14a095c392ebb7edf55e78c5bf682663dcb25135c73aea61: Status 404 returned error can't find the container with id 06c382d1b146b74b14a095c392ebb7edf55e78c5bf682663dcb25135c73aea61 Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.644143 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="800ms" Jan 29 06:35:09 crc kubenswrapper[4861]: W0129 06:35:09.661346 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b0644f626fabc5ceda8affd92bcd97b05d3beee3d5588e1cc72c66d4571750a1 WatchSource:0}: Error finding container b0644f626fabc5ceda8affd92bcd97b05d3beee3d5588e1cc72c66d4571750a1: Status 404 returned error can't find the container with id b0644f626fabc5ceda8affd92bcd97b05d3beee3d5588e1cc72c66d4571750a1 Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.901925 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.903415 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.903446 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.903455 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:09 crc kubenswrapper[4861]: I0129 06:35:09.903480 4861 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 06:35:09 crc kubenswrapper[4861]: E0129 06:35:09.904107 4861 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.034290 4861 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.041345 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 18:51:51.003492201 +0000 UTC Jan 29 06:35:10 crc kubenswrapper[4861]: W0129 06:35:10.083379 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:10 crc kubenswrapper[4861]: E0129 06:35:10.083515 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.124167 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b0644f626fabc5ceda8affd92bcd97b05d3beee3d5588e1cc72c66d4571750a1"} Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.125981 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"06c382d1b146b74b14a095c392ebb7edf55e78c5bf682663dcb25135c73aea61"} Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.127959 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"07e9fa6bb0f615c8908f771178bd762d115914e25ce7c3d485973254d86fb7f5"} Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.129997 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e4b6952b5feff1e43f78ad0659866788f03139c561009a8d95cecd20fc457b2c"} Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.131648 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2d6c5aa05e57324be307c899911a5da192befff46df672f49f560862bada836d"} Jan 29 06:35:10 crc kubenswrapper[4861]: W0129 06:35:10.398940 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:10 crc kubenswrapper[4861]: E0129 06:35:10.399566 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:10 crc kubenswrapper[4861]: W0129 06:35:10.399184 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:10 crc kubenswrapper[4861]: E0129 06:35:10.399639 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:10 crc kubenswrapper[4861]: E0129 06:35:10.445044 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="1.6s" Jan 29 06:35:10 crc kubenswrapper[4861]: W0129 06:35:10.701882 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:10 crc kubenswrapper[4861]: E0129 06:35:10.702033 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.704733 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.707314 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.707382 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.707403 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.707448 4861 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 06:35:10 crc kubenswrapper[4861]: E0129 06:35:10.708045 4861 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Jan 29 06:35:10 crc kubenswrapper[4861]: I0129 06:35:10.951379 4861 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 06:35:10 crc kubenswrapper[4861]: E0129 06:35:10.953350 4861 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.034281 4861 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.042461 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 12:16:15.49787457 +0000 UTC Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.140103 4861 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="36c6928ff7a7225f146dbfb1c19b90de69497499e33eb6dfe5b739503f8cd9a7" exitCode=0 Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.140215 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"36c6928ff7a7225f146dbfb1c19b90de69497499e33eb6dfe5b739503f8cd9a7"} Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.140333 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.142270 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.142330 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.142348 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.143065 4861 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="6b21d63e1683dcdd8f31ab08fb5ece2680c36d64e40f339a74e614146ceb297b" exitCode=0 Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.143161 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"6b21d63e1683dcdd8f31ab08fb5ece2680c36d64e40f339a74e614146ceb297b"} Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.143287 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.145187 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.145246 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.145291 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.148204 4861 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c" exitCode=0 Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.148331 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.148327 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c"} Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.149680 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.149718 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.149731 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.152345 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc"} Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.152386 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81"} Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.152402 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0"} Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.154593 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860" exitCode=0 Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.154769 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860"} Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.154972 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.156225 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.156281 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.156301 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.162331 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.163694 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.163775 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:11 crc kubenswrapper[4861]: I0129 06:35:11.163789 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.034254 4861 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.042828 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 05:39:26.049973634 +0000 UTC Jan 29 06:35:12 crc kubenswrapper[4861]: E0129 06:35:12.046317 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="3.2s" Jan 29 06:35:12 crc kubenswrapper[4861]: W0129 06:35:12.057911 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:12 crc kubenswrapper[4861]: E0129 06:35:12.057992 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.161341 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.161403 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.161418 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.161431 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.163808 4861 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="634fa5f7681478f2a6e2efb1967bcc201c9b13cdf923965df5b74e8b697eacd7" exitCode=0 Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.163868 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"634fa5f7681478f2a6e2efb1967bcc201c9b13cdf923965df5b74e8b697eacd7"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.164021 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.165306 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.165335 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.165347 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.167859 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"44f4b84198cb878a248b3daa7c766de056644c4308280a03f6ebb1d9221358d7"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.167963 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.169090 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.169119 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.169132 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.173600 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.174036 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.174093 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.174110 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.175443 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.175482 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.175495 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.179409 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750"} Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.179526 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.180621 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.180649 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.180663 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.308311 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:12 crc kubenswrapper[4861]: W0129 06:35:12.308479 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.80:6443: connect: connection refused Jan 29 06:35:12 crc kubenswrapper[4861]: E0129 06:35:12.308617 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.80:6443: connect: connection refused" logger="UnhandledError" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.310186 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.310236 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.310248 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.310274 4861 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 06:35:12 crc kubenswrapper[4861]: E0129 06:35:12.310977 4861 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.80:6443: connect: connection refused" node="crc" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.646259 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:12 crc kubenswrapper[4861]: I0129 06:35:12.655762 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.043370 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 14:19:00.910120875 +0000 UTC Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.187177 4861 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d19dfe15890316ee84f57f389b2a5eff2fe1979e21e9d1623fe8913ed2be607e" exitCode=0 Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.187270 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d19dfe15890316ee84f57f389b2a5eff2fe1979e21e9d1623fe8913ed2be607e"} Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.187367 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.188608 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.188655 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.188670 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.192782 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515"} Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.192894 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.192947 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.193316 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.193340 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.194180 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197746 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197791 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197826 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197868 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197877 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197888 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197905 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197906 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197971 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.198005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.197800 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:13 crc kubenswrapper[4861]: I0129 06:35:13.198121 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.044419 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 09:39:17.87347736 +0000 UTC Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.205251 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.205305 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.205368 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.205448 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.205490 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.206965 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"022c796f3b0d3339775825d25b2298001519876127359599ff4a4efd7e72dca2"} Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207023 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5d5503e9ee66c7ef8b6927d6885a08599e816cf2aa288135d73bb040faa966e0"} Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207045 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"521e395aa78a3a709c497886a881f879e5c8a7394e741e15d593d40378e668f9"} Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207525 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207596 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207614 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207607 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207696 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.207982 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.208017 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.208035 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:14 crc kubenswrapper[4861]: I0129 06:35:14.383127 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.044720 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 23:07:17.155298325 +0000 UTC Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.192032 4861 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.218237 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.218876 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7c8a7e09738a804da41cbf174c6525872146b65a17b3a7fecf64b7cb946da001"} Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.219027 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9a3809322389c905c4ecfb51c6ff48a202f4c97396b4252ea8be4b593caad919"} Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.218942 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.220314 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.220372 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.220391 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.220656 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.220706 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.220725 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.511798 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.514319 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.514392 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.514412 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.514460 4861 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.550724 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.551032 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.553037 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.553132 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.553152 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.769794 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:15 crc kubenswrapper[4861]: I0129 06:35:15.959741 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.045004 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 13:43:21.30372405 +0000 UTC Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.226306 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.226387 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.228208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.228260 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.228290 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.228341 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.228381 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:16 crc kubenswrapper[4861]: I0129 06:35:16.228399 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.045571 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 04:01:46.677426513 +0000 UTC Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.189944 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.229211 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.230722 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.230770 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.230783 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.691150 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.691450 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.693281 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.693359 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:17 crc kubenswrapper[4861]: I0129 06:35:17.693379 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:18 crc kubenswrapper[4861]: I0129 06:35:18.046587 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 00:50:32.043341225 +0000 UTC Jan 29 06:35:18 crc kubenswrapper[4861]: I0129 06:35:18.232420 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:18 crc kubenswrapper[4861]: I0129 06:35:18.234115 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:18 crc kubenswrapper[4861]: I0129 06:35:18.234181 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:18 crc kubenswrapper[4861]: I0129 06:35:18.234202 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:19 crc kubenswrapper[4861]: I0129 06:35:19.047047 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 11:07:00.774162426 +0000 UTC Jan 29 06:35:19 crc kubenswrapper[4861]: E0129 06:35:19.203622 4861 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 06:35:20 crc kubenswrapper[4861]: I0129 06:35:20.048055 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 07:49:30.320419381 +0000 UTC Jan 29 06:35:20 crc kubenswrapper[4861]: I0129 06:35:20.610229 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:20 crc kubenswrapper[4861]: I0129 06:35:20.610519 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:20 crc kubenswrapper[4861]: I0129 06:35:20.613649 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:20 crc kubenswrapper[4861]: I0129 06:35:20.613846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:20 crc kubenswrapper[4861]: I0129 06:35:20.613875 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:20 crc kubenswrapper[4861]: I0129 06:35:20.626975 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:21 crc kubenswrapper[4861]: I0129 06:35:21.048755 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 17:21:13.056591417 +0000 UTC Jan 29 06:35:21 crc kubenswrapper[4861]: I0129 06:35:21.240163 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:21 crc kubenswrapper[4861]: I0129 06:35:21.241732 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:21 crc kubenswrapper[4861]: I0129 06:35:21.241773 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:21 crc kubenswrapper[4861]: I0129 06:35:21.241786 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:21 crc kubenswrapper[4861]: I0129 06:35:21.859525 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:22 crc kubenswrapper[4861]: I0129 06:35:22.049432 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 03:35:33.471482075 +0000 UTC Jan 29 06:35:22 crc kubenswrapper[4861]: I0129 06:35:22.243323 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:22 crc kubenswrapper[4861]: I0129 06:35:22.244654 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:22 crc kubenswrapper[4861]: I0129 06:35:22.244778 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:22 crc kubenswrapper[4861]: I0129 06:35:22.244799 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:22 crc kubenswrapper[4861]: W0129 06:35:22.662255 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 29 06:35:22 crc kubenswrapper[4861]: I0129 06:35:22.662398 4861 trace.go:236] Trace[762762459]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 06:35:12.660) (total time: 10002ms): Jan 29 06:35:22 crc kubenswrapper[4861]: Trace[762762459]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (06:35:22.662) Jan 29 06:35:22 crc kubenswrapper[4861]: Trace[762762459]: [10.002293312s] [10.002293312s] END Jan 29 06:35:22 crc kubenswrapper[4861]: E0129 06:35:22.662432 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 29 06:35:23 crc kubenswrapper[4861]: I0129 06:35:23.037968 4861 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 29 06:35:23 crc kubenswrapper[4861]: I0129 06:35:23.050422 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 00:50:02.079742967 +0000 UTC Jan 29 06:35:23 crc kubenswrapper[4861]: W0129 06:35:23.271919 4861 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 29 06:35:23 crc kubenswrapper[4861]: I0129 06:35:23.272067 4861 trace.go:236] Trace[1864983371]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 06:35:13.270) (total time: 10001ms): Jan 29 06:35:23 crc kubenswrapper[4861]: Trace[1864983371]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (06:35:23.271) Jan 29 06:35:23 crc kubenswrapper[4861]: Trace[1864983371]: [10.001483441s] [10.001483441s] END Jan 29 06:35:23 crc kubenswrapper[4861]: E0129 06:35:23.272150 4861 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 29 06:35:24 crc kubenswrapper[4861]: I0129 06:35:24.051029 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 20:02:17.893719638 +0000 UTC Jan 29 06:35:24 crc kubenswrapper[4861]: I0129 06:35:24.068314 4861 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 29 06:35:24 crc kubenswrapper[4861]: I0129 06:35:24.068451 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 29 06:35:24 crc kubenswrapper[4861]: I0129 06:35:24.075823 4861 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 29 06:35:24 crc kubenswrapper[4861]: I0129 06:35:24.075921 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 29 06:35:24 crc kubenswrapper[4861]: I0129 06:35:24.860215 4861 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 06:35:24 crc kubenswrapper[4861]: I0129 06:35:24.860337 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 06:35:25 crc kubenswrapper[4861]: I0129 06:35:25.051546 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 04:39:49.664674144 +0000 UTC Jan 29 06:35:25 crc kubenswrapper[4861]: I0129 06:35:25.777247 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:25 crc kubenswrapper[4861]: I0129 06:35:25.777606 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:25 crc kubenswrapper[4861]: I0129 06:35:25.779712 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:25 crc kubenswrapper[4861]: I0129 06:35:25.779771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:25 crc kubenswrapper[4861]: I0129 06:35:25.779795 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:25 crc kubenswrapper[4861]: I0129 06:35:25.785788 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.003212 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.003556 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.005782 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.005859 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.005879 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.025444 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.051687 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 21:42:12.321268576 +0000 UTC Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.257446 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.257509 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.257652 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.259549 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.259578 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.259639 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.259662 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.259643 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.259787 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:26 crc kubenswrapper[4861]: I0129 06:35:26.823460 4861 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 06:35:27 crc kubenswrapper[4861]: I0129 06:35:27.051988 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 13:03:35.340182561 +0000 UTC Jan 29 06:35:27 crc kubenswrapper[4861]: I0129 06:35:27.897366 4861 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.028677 4861 apiserver.go:52] "Watching apiserver" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.053206 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 18:33:39.575794059 +0000 UTC Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.410769 4861 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.411238 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.411755 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.411958 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.412000 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.412043 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:28 crc kubenswrapper[4861]: E0129 06:35:28.412137 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.411996 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:28 crc kubenswrapper[4861]: E0129 06:35:28.412211 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.412316 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:28 crc kubenswrapper[4861]: E0129 06:35:28.412400 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.421003 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.421035 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.421123 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.421321 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.421444 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.422109 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.422402 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.422609 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.423498 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.442546 4861 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.455306 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.475050 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.491729 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.521531 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.535481 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.565867 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.582983 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.594285 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.607225 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.618950 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:28 crc kubenswrapper[4861]: I0129 06:35:28.629708 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.053894 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 21:18:52.389344924 +0000 UTC Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.054628 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.059390 4861 trace.go:236] Trace[349371746]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 06:35:15.668) (total time: 13391ms): Jan 29 06:35:29 crc kubenswrapper[4861]: Trace[349371746]: ---"Objects listed" error: 13391ms (06:35:29.059) Jan 29 06:35:29 crc kubenswrapper[4861]: Trace[349371746]: [13.391081556s] [13.391081556s] END Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.059609 4861 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.059561 4861 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.059679 4861 trace.go:236] Trace[1726960794]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 06:35:15.967) (total time: 13091ms): Jan 29 06:35:29 crc kubenswrapper[4861]: Trace[1726960794]: ---"Objects listed" error: 13091ms (06:35:29.059) Jan 29 06:35:29 crc kubenswrapper[4861]: Trace[1726960794]: [13.091639513s] [13.091639513s] END Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.059892 4861 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.062751 4861 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.084145 4861 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.124851 4861 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:60276->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.124970 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:60276->192.168.126.11:17697: read: connection reset by peer" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.124890 4861 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:60278->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.125353 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:60278->192.168.126.11:17697: read: connection reset by peer" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.126040 4861 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.126128 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.139439 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.151478 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161142 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161183 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161209 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161235 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161258 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161283 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161301 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161320 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161340 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161360 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161378 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161400 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161423 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161442 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161463 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161483 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161505 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161607 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161662 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161607 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161689 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161713 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161740 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161763 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161783 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161814 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161844 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161865 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161870 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161896 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161926 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161947 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161967 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161988 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162010 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162056 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162091 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162119 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162141 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162164 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162190 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162224 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162243 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162266 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162287 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162307 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162326 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162346 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162364 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162383 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162405 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162427 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162448 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162465 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162484 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161861 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161843 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.161932 4861 csr.go:261] certificate signing request csr-fhx76 is approved, waiting to be issued Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162140 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162186 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162358 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162396 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.162512 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:35:29.662485682 +0000 UTC m=+21.333980439 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.166723 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.166801 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162509 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162761 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162770 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.162836 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163030 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163048 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163060 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163341 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163515 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163563 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163596 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163724 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163745 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163939 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.163949 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164020 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164165 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164190 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164186 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164361 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164771 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164779 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164803 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.164803 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.165000 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.165022 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.165109 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.166253 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.166449 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.166856 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.167029 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.167171 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.167669 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.166784 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.167991 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168096 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168175 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168249 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168331 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168400 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168468 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168532 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168603 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168675 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168742 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168805 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168870 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168933 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169013 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169102 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169179 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.168345 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169013 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169281 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169237 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169249 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169387 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169392 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169413 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169439 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169461 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169483 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169479 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169504 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169622 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169643 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169670 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169713 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169757 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169794 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169830 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169877 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169919 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169970 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170010 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170054 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170116 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170158 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170193 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170232 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170293 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170334 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170375 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170411 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170438 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170466 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170492 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170522 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170551 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170583 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170629 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170662 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170691 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170718 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170744 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170782 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170818 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170846 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170874 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170901 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170929 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170960 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170992 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171022 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171051 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171108 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171137 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171170 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171199 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171224 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171259 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171293 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171329 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.174996 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175054 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175124 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175167 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175192 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175222 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175245 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175268 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175288 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175313 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175339 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175361 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175392 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175418 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175443 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175469 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175504 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175539 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175573 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175604 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175638 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175666 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175699 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175739 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175774 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175809 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175843 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175876 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175910 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175944 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175980 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176019 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176062 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176120 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176152 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176183 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176226 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176261 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176298 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176331 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176362 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176394 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176427 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176459 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176500 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176533 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176572 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176606 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176642 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176702 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176740 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176773 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176804 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176834 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176864 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176900 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176930 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176967 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176997 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177030 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177067 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177130 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177173 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177211 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177251 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177286 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177326 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177363 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177404 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177439 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177513 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177563 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.169834 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177607 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170316 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170676 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.170912 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171170 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171214 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171522 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.171638 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.172285 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.172627 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.172673 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.172673 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.172725 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177654 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177809 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177848 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177884 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177916 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177945 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177976 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178001 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178029 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178062 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178148 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178268 4861 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178306 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178323 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178340 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178354 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178369 4861 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178385 4861 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178399 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178416 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178430 4861 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178445 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178460 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178475 4861 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178488 4861 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178501 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178514 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178527 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178542 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178556 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178570 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178585 4861 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178600 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178616 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178630 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178644 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178658 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178672 4861 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178685 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178698 4861 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178710 4861 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178723 4861 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178737 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178754 4861 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178780 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178793 4861 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178807 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178820 4861 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178833 4861 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178851 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178867 4861 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178888 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178909 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178927 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178946 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178964 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178981 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178997 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179015 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179032 4861 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179048 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179066 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179112 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179131 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179148 4861 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179164 4861 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179181 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179203 4861 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179221 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179237 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179255 4861 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179273 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179289 4861 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179309 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.184845 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.172864 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.174657 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.174912 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.174923 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175385 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.175979 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.176493 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177172 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177248 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177273 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177589 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177583 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.177997 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.186623 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178464 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178599 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.178799 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179064 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179399 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.179406 4861 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179772 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.179897 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.180186 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.180543 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.180772 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.182726 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.182998 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183144 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183271 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183478 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183479 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183497 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183708 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183793 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183931 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.183945 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.184065 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.184434 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.184501 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.184532 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.184945 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.184998 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.182699 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.185486 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.185544 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.185616 4861 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.185817 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.186630 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.186947 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.187063 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:29.687040775 +0000 UTC m=+21.358535332 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.187132 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.187335 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.187383 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.187560 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.187626 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.187683 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.187766 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188107 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188202 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188121 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.186888 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188255 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188414 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188458 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188517 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188553 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188747 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.188941 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.189548 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.189994 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.190521 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.190665 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.191367 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.192236 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.192469 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.192562 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.192604 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.192874 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.192848 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.192923 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.193401 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:29.69336598 +0000 UTC m=+21.364860577 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.193472 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.193581 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.193741 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.194675 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.196695 4861 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.197890 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.197915 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.186590 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.198571 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.198882 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.199270 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.199300 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.200254 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.200417 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.200767 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.201034 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.201193 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.201456 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.202208 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.202889 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.203432 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.203576 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.205213 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.205447 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.207606 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.210352 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.210764 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.210798 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.210815 4861 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.210876 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.210912 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:29.710885971 +0000 UTC m=+21.382380528 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.213428 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.213457 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.213474 4861 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.213525 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:29.713509955 +0000 UTC m=+21.385004522 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.213919 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.213943 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.216532 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.217727 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.217846 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.219269 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.219344 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.228189 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.229585 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.239204 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.240378 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.251607 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.279884 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.279924 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.279994 4861 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280007 4861 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280018 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280030 4861 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280043 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280056 4861 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280087 4861 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280097 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280106 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280153 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280169 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280182 4861 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280182 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280206 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280222 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280236 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280248 4861 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280260 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280170 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280273 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280341 4861 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280353 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280364 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280377 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280389 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280400 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280411 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280458 4861 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280472 4861 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280484 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280495 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280506 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280515 4861 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280526 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280536 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280547 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280557 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280566 4861 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280576 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280587 4861 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280596 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280606 4861 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280615 4861 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280624 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280634 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280643 4861 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280654 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280663 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280672 4861 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280681 4861 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280691 4861 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280701 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280712 4861 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280721 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280730 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280738 4861 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280747 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280756 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280766 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280776 4861 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280786 4861 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280795 4861 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280805 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280815 4861 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280826 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280835 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280844 4861 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280852 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280861 4861 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280871 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280879 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280889 4861 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280898 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280908 4861 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280917 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280926 4861 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280935 4861 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280944 4861 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280954 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280964 4861 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280973 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280981 4861 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.280993 4861 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281002 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281010 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281020 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281029 4861 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281039 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281048 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281057 4861 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281066 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281098 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281107 4861 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281116 4861 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281125 4861 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281134 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281143 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281152 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281167 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281176 4861 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281185 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281194 4861 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281203 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281212 4861 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281220 4861 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281229 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281241 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281250 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.281260 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.341399 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.685898 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.686133 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:35:30.686113208 +0000 UTC m=+22.357607765 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.787628 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.787692 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.787715 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:29 crc kubenswrapper[4861]: I0129 06:35:29.787733 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.787882 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.787923 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.787936 4861 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.787984 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:30.787969651 +0000 UTC m=+22.459464208 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.787975 4861 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.788040 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.788119 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.788136 4861 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.788193 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:30.788150696 +0000 UTC m=+22.459645283 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.788217 4861 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.788235 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:30.788218687 +0000 UTC m=+22.459713284 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:29 crc kubenswrapper[4861]: E0129 06:35:29.788422 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:30.788394622 +0000 UTC m=+22.459889179 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.054908 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 09:28:30.79736349 +0000 UTC Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.115465 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.115616 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.115623 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.115835 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.115965 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.116244 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.128810 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.129371 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.129428 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.129447 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.129590 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.129730 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.129975 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.130165 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.130219 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.130285 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.130400 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.130492 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.130528 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.130794 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.131000 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.131063 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.131416 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.131659 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.133380 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.133553 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.135920 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.136236 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.139979 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.149389 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.154155 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.173046 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193482 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193574 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193602 4861 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193627 4861 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193685 4861 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193706 4861 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193727 4861 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193780 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193800 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193853 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193874 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193896 4861 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193949 4861 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193969 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.193989 4861 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194037 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194057 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194119 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194139 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194158 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194209 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194228 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194248 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.194299 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.258221 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.273766 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.531536 4861 csr.go:257] certificate signing request csr-fhx76 is issued Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.699738 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.700527 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:35:32.700461703 +0000 UTC m=+24.371956310 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.801449 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.801968 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.802241 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.801747 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: I0129 06:35:30.802437 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.802540 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.802617 4861 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.802134 4861 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.802735 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:32.802693705 +0000 UTC m=+24.474188442 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.802796 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:32.802760007 +0000 UTC m=+24.474254764 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.802399 4861 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.802879 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:32.802858739 +0000 UTC m=+24.474353336 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.803343 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.803398 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.803415 4861 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:30 crc kubenswrapper[4861]: E0129 06:35:30.803497 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:32.803472634 +0000 UTC m=+24.474967201 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:30 crc kubenswrapper[4861]: W0129 06:35:30.992344 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-743cae866618a5ebee983b422cb9f4796742849afea1fa393a27f77d249fdba4 WatchSource:0}: Error finding container 743cae866618a5ebee983b422cb9f4796742849afea1fa393a27f77d249fdba4: Status 404 returned error can't find the container with id 743cae866618a5ebee983b422cb9f4796742849afea1fa393a27f77d249fdba4 Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.055864 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 10:57:48.894802288 +0000 UTC Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.102857 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-2kbk8"] Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.103246 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-2kbk8" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.106528 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.107953 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.107998 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.120307 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.121310 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.122056 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.122695 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.123270 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.123754 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.124345 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.124986 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.125660 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.126200 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.126754 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.128358 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.129322 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.129815 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.130785 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.131312 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.132197 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.132742 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.133176 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.134097 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.134715 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.135223 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.136188 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.136624 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.137620 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.138072 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.139266 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.139876 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.140736 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.141048 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.141356 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.141854 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.142802 4861 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.142940 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.144586 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.145531 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.145936 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.147595 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.148647 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.149215 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.150208 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.150854 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.151396 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.151380 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.152376 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.153393 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.154018 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.154840 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.155431 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.156304 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.157041 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.158111 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.158605 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.159118 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.159943 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.160517 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.161430 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.162263 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.177924 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.197100 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.208507 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ff129b34-bccd-4a2c-b1c2-75a8a78e1715-hosts-file\") pod \"node-resolver-2kbk8\" (UID: \"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\") " pod="openshift-dns/node-resolver-2kbk8" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.208570 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kvjx\" (UniqueName: \"kubernetes.io/projected/ff129b34-bccd-4a2c-b1c2-75a8a78e1715-kube-api-access-9kvjx\") pod \"node-resolver-2kbk8\" (UID: \"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\") " pod="openshift-dns/node-resolver-2kbk8" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.209176 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.270865 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.273436 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515" exitCode=255 Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.273527 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515"} Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.274748 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d255ccea654a29f9c7507cf106ba2ec5c27db8dff1aa07c8709488e1763f5564"} Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.275679 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"743cae866618a5ebee983b422cb9f4796742849afea1fa393a27f77d249fdba4"} Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.279256 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"b4b072e38c93d55334efd7370173791aeedf9787092f0296994adfb8b3c992cb"} Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.292779 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.309213 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kvjx\" (UniqueName: \"kubernetes.io/projected/ff129b34-bccd-4a2c-b1c2-75a8a78e1715-kube-api-access-9kvjx\") pod \"node-resolver-2kbk8\" (UID: \"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\") " pod="openshift-dns/node-resolver-2kbk8" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.309287 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ff129b34-bccd-4a2c-b1c2-75a8a78e1715-hosts-file\") pod \"node-resolver-2kbk8\" (UID: \"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\") " pod="openshift-dns/node-resolver-2kbk8" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.309384 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/ff129b34-bccd-4a2c-b1c2-75a8a78e1715-hosts-file\") pod \"node-resolver-2kbk8\" (UID: \"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\") " pod="openshift-dns/node-resolver-2kbk8" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.310958 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.321750 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.329186 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kvjx\" (UniqueName: \"kubernetes.io/projected/ff129b34-bccd-4a2c-b1c2-75a8a78e1715-kube-api-access-9kvjx\") pod \"node-resolver-2kbk8\" (UID: \"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\") " pod="openshift-dns/node-resolver-2kbk8" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.330243 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.341646 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.353296 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.365968 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.376564 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.377250 4861 scope.go:117] "RemoveContainer" containerID="56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.420614 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-2kbk8" Jan 29 06:35:31 crc kubenswrapper[4861]: W0129 06:35:31.434923 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff129b34_bccd_4a2c_b1c2_75a8a78e1715.slice/crio-ec6adc87af80adb558fa7f98e2c7a711a84b6be366a519f1f6d5ae0525d17556 WatchSource:0}: Error finding container ec6adc87af80adb558fa7f98e2c7a711a84b6be366a519f1f6d5ae0525d17556: Status 404 returned error can't find the container with id ec6adc87af80adb558fa7f98e2c7a711a84b6be366a519f1f6d5ae0525d17556 Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.532994 4861 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-29 06:30:30 +0000 UTC, rotation deadline is 2026-11-05 14:19:59.728255546 +0000 UTC Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.533083 4861 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6727h44m28.195204984s for next certificate rotation Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.863437 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.867712 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.874273 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.886931 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.913109 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.928883 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.939129 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.941303 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.953573 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.974479 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.975821 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-wkh9p"] Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.976086 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-6dfzk"] Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.976412 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.976860 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-4942p"] Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.976967 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.977134 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4942p" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992140 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992297 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992152 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992559 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992684 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992804 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992865 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992916 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.993024 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.993130 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.993217 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 29 06:35:31 crc kubenswrapper[4861]: I0129 06:35:31.992694 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.005040 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.015018 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.025901 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.035121 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.051282 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.056350 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 08:50:04.417541872 +0000 UTC Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.063228 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.072225 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.089770 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.101115 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.113740 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.115983 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.116042 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.116013 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.116164 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.116329 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.116498 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.127637 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132048 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/da8019d1-2d2c-493d-b80f-1d566eec9475-cni-binary-copy\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132098 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-hostroot\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132128 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5fc70726-e8f8-40d8-b31f-2853e3e856d7-mcd-auth-proxy-config\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132149 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-k8s-cni-cncf-io\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132176 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-socket-dir-parent\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132195 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-system-cni-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132287 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5fc70726-e8f8-40d8-b31f-2853e3e856d7-rootfs\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132434 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-cnibin\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132518 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-etc-kubernetes\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132547 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-system-cni-dir\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132595 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-netns\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132636 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j282\" (UniqueName: \"kubernetes.io/projected/bccb8691-d6c8-4698-98ec-1f20073e61c4-kube-api-access-5j282\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132677 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-daemon-config\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132730 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-conf-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132762 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-multus-certs\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132779 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bccb8691-d6c8-4698-98ec-1f20073e61c4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132812 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-kubelet\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132835 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfd4p\" (UniqueName: \"kubernetes.io/projected/5fc70726-e8f8-40d8-b31f-2853e3e856d7-kube-api-access-cfd4p\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132867 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-cni-bin\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132894 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-cnibin\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132928 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-os-release\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132949 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-cni-multus\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.132972 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-cni-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.133046 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tn628\" (UniqueName: \"kubernetes.io/projected/da8019d1-2d2c-493d-b80f-1d566eec9475-kube-api-access-tn628\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.133100 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bccb8691-d6c8-4698-98ec-1f20073e61c4-cni-binary-copy\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.133139 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5fc70726-e8f8-40d8-b31f-2853e3e856d7-proxy-tls\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.133182 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-os-release\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.133216 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.143136 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.156052 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.233985 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-socket-dir-parent\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234032 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-k8s-cni-cncf-io\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234053 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-system-cni-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234094 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5fc70726-e8f8-40d8-b31f-2853e3e856d7-rootfs\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234123 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-cnibin\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234149 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-etc-kubernetes\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234164 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-system-cni-dir\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234195 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-netns\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234210 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j282\" (UniqueName: \"kubernetes.io/projected/bccb8691-d6c8-4698-98ec-1f20073e61c4-kube-api-access-5j282\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234225 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-daemon-config\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234239 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-conf-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234255 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-multus-certs\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234269 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bccb8691-d6c8-4698-98ec-1f20073e61c4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234294 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-kubelet\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234309 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfd4p\" (UniqueName: \"kubernetes.io/projected/5fc70726-e8f8-40d8-b31f-2853e3e856d7-kube-api-access-cfd4p\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234324 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-cni-bin\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234338 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-cnibin\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234355 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-os-release\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234369 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-cni-multus\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234383 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-cni-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234398 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tn628\" (UniqueName: \"kubernetes.io/projected/da8019d1-2d2c-493d-b80f-1d566eec9475-kube-api-access-tn628\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234413 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bccb8691-d6c8-4698-98ec-1f20073e61c4-cni-binary-copy\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234427 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5fc70726-e8f8-40d8-b31f-2853e3e856d7-proxy-tls\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234443 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-os-release\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234460 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234480 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/da8019d1-2d2c-493d-b80f-1d566eec9475-cni-binary-copy\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234494 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-hostroot\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.234508 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5fc70726-e8f8-40d8-b31f-2853e3e856d7-mcd-auth-proxy-config\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235164 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5fc70726-e8f8-40d8-b31f-2853e3e856d7-mcd-auth-proxy-config\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235377 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-socket-dir-parent\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235408 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-k8s-cni-cncf-io\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235443 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-system-cni-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235463 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5fc70726-e8f8-40d8-b31f-2853e3e856d7-rootfs\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235489 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-cnibin\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235510 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-etc-kubernetes\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235531 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-system-cni-dir\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.235552 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-netns\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.236313 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-daemon-config\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.236355 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-conf-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.236378 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-run-multus-certs\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.236834 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bccb8691-d6c8-4698-98ec-1f20073e61c4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.236875 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-kubelet\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.237035 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-cni-bin\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.237065 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-cnibin\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.237382 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-os-release\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.237460 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-host-var-lib-cni-multus\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.237619 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-os-release\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.238021 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-hostroot\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.238364 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/da8019d1-2d2c-493d-b80f-1d566eec9475-multus-cni-dir\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.238601 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/da8019d1-2d2c-493d-b80f-1d566eec9475-cni-binary-copy\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.238693 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bccb8691-d6c8-4698-98ec-1f20073e61c4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.239645 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bccb8691-d6c8-4698-98ec-1f20073e61c4-cni-binary-copy\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.253172 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5fc70726-e8f8-40d8-b31f-2853e3e856d7-proxy-tls\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.269141 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j282\" (UniqueName: \"kubernetes.io/projected/bccb8691-d6c8-4698-98ec-1f20073e61c4-kube-api-access-5j282\") pod \"multus-additional-cni-plugins-6dfzk\" (UID: \"bccb8691-d6c8-4698-98ec-1f20073e61c4\") " pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.278399 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tn628\" (UniqueName: \"kubernetes.io/projected/da8019d1-2d2c-493d-b80f-1d566eec9475-kube-api-access-tn628\") pod \"multus-4942p\" (UID: \"da8019d1-2d2c-493d-b80f-1d566eec9475\") " pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.282138 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfd4p\" (UniqueName: \"kubernetes.io/projected/5fc70726-e8f8-40d8-b31f-2853e3e856d7-kube-api-access-cfd4p\") pod \"machine-config-daemon-wkh9p\" (UID: \"5fc70726-e8f8-40d8-b31f-2853e3e856d7\") " pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.287036 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-2kbk8" event={"ID":"ff129b34-bccd-4a2c-b1c2-75a8a78e1715","Type":"ContainerStarted","Data":"db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1"} Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.287097 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-2kbk8" event={"ID":"ff129b34-bccd-4a2c-b1c2-75a8a78e1715","Type":"ContainerStarted","Data":"ec6adc87af80adb558fa7f98e2c7a711a84b6be366a519f1f6d5ae0525d17556"} Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.289740 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.292195 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e"} Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.292508 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.294063 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c"} Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.294132 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1"} Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.295540 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a"} Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.302138 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.316826 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.321252 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4942p" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.328563 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.346422 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.364890 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: W0129 06:35:32.373135 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbccb8691_d6c8_4698_98ec_1f20073e61c4.slice/crio-797c69ff4f8c6551435bf250aac9e96f1e74396cbbd054c57aa2d54d13a4a757 WatchSource:0}: Error finding container 797c69ff4f8c6551435bf250aac9e96f1e74396cbbd054c57aa2d54d13a4a757: Status 404 returned error can't find the container with id 797c69ff4f8c6551435bf250aac9e96f1e74396cbbd054c57aa2d54d13a4a757 Jan 29 06:35:32 crc kubenswrapper[4861]: W0129 06:35:32.380875 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda8019d1_2d2c_493d_b80f_1d566eec9475.slice/crio-badc052f4ce2ade738360d123b9b721a24f7f32291753711aca16c16bc3a899d WatchSource:0}: Error finding container badc052f4ce2ade738360d123b9b721a24f7f32291753711aca16c16bc3a899d: Status 404 returned error can't find the container with id badc052f4ce2ade738360d123b9b721a24f7f32291753711aca16c16bc3a899d Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.381399 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5xdwl"] Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.382246 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.384601 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.384855 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.385154 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.385279 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.385322 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.397035 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.397457 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.432349 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.458451 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.475330 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.485350 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.495554 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.506060 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.521229 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.533250 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540715 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-slash\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540751 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-netns\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540767 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540788 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-config\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540804 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-ovn-kubernetes\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540821 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-script-lib\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540838 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-node-log\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540854 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpcvl\" (UniqueName: \"kubernetes.io/projected/c6ece014-5432-4877-9449-4253d6124c73-kube-api-access-jpcvl\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540873 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-netd\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540891 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-ovn\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540908 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-etc-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540925 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c6ece014-5432-4877-9449-4253d6124c73-ovn-node-metrics-cert\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540942 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-kubelet\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540957 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-systemd\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540972 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-var-lib-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.540989 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-log-socket\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.541016 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-bin\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.541038 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-systemd-units\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.541121 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-env-overrides\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.541148 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.545707 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.557481 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.574881 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.586340 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.602171 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.613550 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.623385 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.632455 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.640386 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641704 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-log-socket\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641758 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-bin\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641785 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-systemd-units\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641833 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641862 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-env-overrides\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641875 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-bin\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641892 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-slash\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641899 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-log-socket\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641938 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-slash\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641947 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-netns\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641967 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641989 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641991 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.641979 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-systemd-units\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642012 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-netns\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642036 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-config\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642088 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-ovn-kubernetes\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642114 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-script-lib\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642139 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-node-log\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642170 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpcvl\" (UniqueName: \"kubernetes.io/projected/c6ece014-5432-4877-9449-4253d6124c73-kube-api-access-jpcvl\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642198 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-netd\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642230 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-ovn\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642263 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-etc-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642286 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c6ece014-5432-4877-9449-4253d6124c73-ovn-node-metrics-cert\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642312 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-systemd\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642335 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-kubelet\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642359 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-var-lib-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642463 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-var-lib-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642515 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-ovn-kubernetes\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642678 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-systemd\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642684 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-ovn\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642724 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-node-log\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642792 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-kubelet\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642814 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-netd\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642848 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-env-overrides\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.642870 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-etc-openvswitch\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.643044 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-config\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.643467 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-script-lib\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.651139 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.660680 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.676899 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.682802 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c6ece014-5432-4877-9449-4253d6124c73-ovn-node-metrics-cert\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.682838 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpcvl\" (UniqueName: \"kubernetes.io/projected/c6ece014-5432-4877-9449-4253d6124c73-kube-api-access-jpcvl\") pod \"ovnkube-node-5xdwl\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.687273 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.700992 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.708110 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.743544 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.743703 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:35:36.743679389 +0000 UTC m=+28.415173946 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:35:32 crc kubenswrapper[4861]: W0129 06:35:32.789436 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6ece014_5432_4877_9449_4253d6124c73.slice/crio-20d47002536ef79877b65908f4cb24594ef452e5552e6bbe08a1c75aa13aebce WatchSource:0}: Error finding container 20d47002536ef79877b65908f4cb24594ef452e5552e6bbe08a1c75aa13aebce: Status 404 returned error can't find the container with id 20d47002536ef79877b65908f4cb24594ef452e5552e6bbe08a1c75aa13aebce Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.844562 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.844615 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.844643 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:32 crc kubenswrapper[4861]: I0129 06:35:32.844669 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844740 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844768 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844780 4861 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844806 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844828 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844820 4861 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844920 4861 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844841 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:36.844823644 +0000 UTC m=+28.516318201 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.844843 4861 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.845159 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:36.845108221 +0000 UTC m=+28.516602778 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.845188 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:36.845180663 +0000 UTC m=+28.516675220 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:32 crc kubenswrapper[4861]: E0129 06:35:32.845211 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:36.845203103 +0000 UTC m=+28.516697660 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.056523 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 09:11:50.532170085 +0000 UTC Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.301751 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4" exitCode=0 Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.301887 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.301990 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"20d47002536ef79877b65908f4cb24594ef452e5552e6bbe08a1c75aa13aebce"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.304609 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4942p" event={"ID":"da8019d1-2d2c-493d-b80f-1d566eec9475","Type":"ContainerStarted","Data":"740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.304946 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4942p" event={"ID":"da8019d1-2d2c-493d-b80f-1d566eec9475","Type":"ContainerStarted","Data":"badc052f4ce2ade738360d123b9b721a24f7f32291753711aca16c16bc3a899d"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.306750 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerStarted","Data":"2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.306787 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerStarted","Data":"797c69ff4f8c6551435bf250aac9e96f1e74396cbbd054c57aa2d54d13a4a757"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.309250 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.309324 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.309354 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"0a8a39418262b1608ebb1c04919cd3c57ec07fa5ab8c0ebf7b85d10fe68848ef"} Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.327817 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.347970 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.363796 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.379602 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.396386 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.407496 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.425309 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.438101 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.456497 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.471587 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.499860 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.513353 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.526869 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.540432 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.558088 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.566755 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-m9rgs"] Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.567289 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.569257 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.569575 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.569627 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.570155 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.578262 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.592195 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.607466 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.620670 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.637904 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.651965 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5nvp\" (UniqueName: \"kubernetes.io/projected/d15f2c9b-c70f-4574-b682-aeed3426f2c0-kube-api-access-h5nvp\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.652013 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d15f2c9b-c70f-4574-b682-aeed3426f2c0-serviceca\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.652058 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d15f2c9b-c70f-4574-b682-aeed3426f2c0-host\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.664214 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.688030 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.704656 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.721765 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.737349 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.751559 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.753045 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d15f2c9b-c70f-4574-b682-aeed3426f2c0-host\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.753171 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d15f2c9b-c70f-4574-b682-aeed3426f2c0-host\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.753172 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5nvp\" (UniqueName: \"kubernetes.io/projected/d15f2c9b-c70f-4574-b682-aeed3426f2c0-kube-api-access-h5nvp\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.753236 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d15f2c9b-c70f-4574-b682-aeed3426f2c0-serviceca\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.754439 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d15f2c9b-c70f-4574-b682-aeed3426f2c0-serviceca\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.786259 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5nvp\" (UniqueName: \"kubernetes.io/projected/d15f2c9b-c70f-4574-b682-aeed3426f2c0-kube-api-access-h5nvp\") pod \"node-ca-m9rgs\" (UID: \"d15f2c9b-c70f-4574-b682-aeed3426f2c0\") " pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.797427 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.845384 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.860706 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.875024 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.881025 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-m9rgs" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.891036 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: W0129 06:35:33.894797 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd15f2c9b_c70f_4574_b682_aeed3426f2c0.slice/crio-16234c5e028ceb6d7e5d4c1f43c2786f05ad1423ac8885affd85ddacb7e14ae9 WatchSource:0}: Error finding container 16234c5e028ceb6d7e5d4c1f43c2786f05ad1423ac8885affd85ddacb7e14ae9: Status 404 returned error can't find the container with id 16234c5e028ceb6d7e5d4c1f43c2786f05ad1423ac8885affd85ddacb7e14ae9 Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.905071 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.917909 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.929039 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.940635 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.952395 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.967503 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:33 crc kubenswrapper[4861]: I0129 06:35:33.988246 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.005946 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.029490 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.057786 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 17:23:42.581362786 +0000 UTC Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.116016 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.116125 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:34 crc kubenswrapper[4861]: E0129 06:35:34.116197 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:34 crc kubenswrapper[4861]: E0129 06:35:34.116322 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.116669 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:34 crc kubenswrapper[4861]: E0129 06:35:34.116755 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.314493 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56"} Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.316310 4861 generic.go:334] "Generic (PLEG): container finished" podID="bccb8691-d6c8-4698-98ec-1f20073e61c4" containerID="2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc" exitCode=0 Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.316395 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerDied","Data":"2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc"} Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.320048 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-m9rgs" event={"ID":"d15f2c9b-c70f-4574-b682-aeed3426f2c0","Type":"ContainerStarted","Data":"56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562"} Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.320124 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-m9rgs" event={"ID":"d15f2c9b-c70f-4574-b682-aeed3426f2c0","Type":"ContainerStarted","Data":"16234c5e028ceb6d7e5d4c1f43c2786f05ad1423ac8885affd85ddacb7e14ae9"} Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.340967 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.354422 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.373890 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.389447 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.405857 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.422318 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.437504 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.449669 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.461047 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.477888 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.502125 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.517735 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.533884 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.550991 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.563928 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.606871 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.625412 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.644669 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.670758 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.722339 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.734863 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.748221 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.779136 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.816101 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.855126 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.897755 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.938884 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:34 crc kubenswrapper[4861]: I0129 06:35:34.986870 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.058713 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 21:54:52.594726507 +0000 UTC Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.325386 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerStarted","Data":"3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.329801 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.329835 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.329847 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.350685 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.371589 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.386795 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.401788 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.424295 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.444714 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.457581 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.463853 4861 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.466498 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.466560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.466582 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.466727 4861 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.476742 4861 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.476741 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.477171 4861 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.478505 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.478566 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.478588 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.478722 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.478890 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.512486 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.529161 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: E0129 06:35:35.530430 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.536653 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.536693 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.536702 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.536719 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.536734 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:35 crc kubenswrapper[4861]: E0129 06:35:35.551237 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.552134 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.555986 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.556046 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.556058 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.556101 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.556116 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.567796 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: E0129 06:35:35.577303 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.581811 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.581868 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.581890 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.581918 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.581938 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.595066 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: E0129 06:35:35.597645 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.614901 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.614937 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.614950 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.614971 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.614984 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.617661 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: E0129 06:35:35.637366 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:35Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:35 crc kubenswrapper[4861]: E0129 06:35:35.637512 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.639742 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.639785 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.639821 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.639841 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.639855 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.742750 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.742810 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.742824 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.742852 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.742867 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.846150 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.846198 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.846211 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.846233 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.846248 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.949274 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.949773 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.949783 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.949797 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:35 crc kubenswrapper[4861]: I0129 06:35:35.949808 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:35Z","lastTransitionTime":"2026-01-29T06:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.053644 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.053692 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.053703 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.053725 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.053739 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.059847 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 17:52:54.454516813 +0000 UTC Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.115447 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.115581 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.115662 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.115809 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.115841 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.116047 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.157247 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.157314 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.157333 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.157360 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.157379 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.260609 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.260676 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.260697 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.260725 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.260742 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.337872 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.337938 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.337972 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.341447 4861 generic.go:334] "Generic (PLEG): container finished" podID="bccb8691-d6c8-4698-98ec-1f20073e61c4" containerID="3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8" exitCode=0 Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.341493 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerDied","Data":"3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.362811 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.364947 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.365184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.365324 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.368742 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.368920 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.382362 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.402622 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.424422 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.441452 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.456524 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.472501 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.472572 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.472591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.472617 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.472636 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.473851 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.492763 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.513292 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.530027 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.550059 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.567597 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.591989 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.592038 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.592053 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.592097 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.592113 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.615674 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.636359 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:36Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.696505 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.696573 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.696584 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.696836 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.696857 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.795928 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.796243 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:35:44.796221678 +0000 UTC m=+36.467716235 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.800618 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.800703 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.800725 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.800755 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.800775 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.897217 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.897305 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.897362 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.897400 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897731 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897771 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897786 4861 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897789 4861 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897852 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:44.897833674 +0000 UTC m=+36.569328231 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897892 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:44.897864015 +0000 UTC m=+36.569358612 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897727 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897943 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.897963 4861 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.898038 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:44.898016079 +0000 UTC m=+36.569510676 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.898126 4861 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:36 crc kubenswrapper[4861]: E0129 06:35:36.898206 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:35:44.898186183 +0000 UTC m=+36.569680770 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.903515 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.903564 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.903576 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.903595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:36 crc kubenswrapper[4861]: I0129 06:35:36.903607 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:36Z","lastTransitionTime":"2026-01-29T06:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.007344 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.007402 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.007414 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.007434 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.007449 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.060599 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 10:00:35.662835612 +0000 UTC Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.110981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.111057 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.111069 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.111108 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.111119 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.214775 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.214840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.214858 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.214884 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.214906 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.319350 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.319422 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.319440 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.319465 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.319485 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.349577 4861 generic.go:334] "Generic (PLEG): container finished" podID="bccb8691-d6c8-4698-98ec-1f20073e61c4" containerID="5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361" exitCode=0 Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.349654 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerDied","Data":"5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.377407 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.409445 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.422538 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.422621 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.422639 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.422669 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.422690 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.442872 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.460906 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.483312 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.503420 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.522069 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.527482 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.527560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.527581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.527613 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.527634 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.540921 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.557982 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.575503 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.592670 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.605621 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.619865 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.630654 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.631323 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.631350 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.631386 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.631408 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.637843 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.735372 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.735467 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.735507 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.735545 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.735573 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.838219 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.838293 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.838313 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.838345 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.838365 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.942144 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.942216 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.942235 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.942264 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:37 crc kubenswrapper[4861]: I0129 06:35:37.942287 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:37Z","lastTransitionTime":"2026-01-29T06:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.045860 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.045929 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.045949 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.045977 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.045997 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.061347 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 16:25:46.62902963 +0000 UTC Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.115646 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.115714 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.115749 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:38 crc kubenswrapper[4861]: E0129 06:35:38.115886 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:38 crc kubenswrapper[4861]: E0129 06:35:38.116117 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:38 crc kubenswrapper[4861]: E0129 06:35:38.116282 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.150916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.150991 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.151013 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.151042 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.151064 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.255311 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.255489 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.255515 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.255550 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.255571 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.358281 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.358391 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.358414 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.358438 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.358455 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.360752 4861 generic.go:334] "Generic (PLEG): container finished" podID="bccb8691-d6c8-4698-98ec-1f20073e61c4" containerID="074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a" exitCode=0 Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.360818 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerDied","Data":"074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.386159 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.411410 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.430421 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.446620 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.461322 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.461438 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.461460 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.461528 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.461547 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.464151 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.480723 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.495877 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.513353 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.527968 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.548718 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.566849 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.566919 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.566935 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.566962 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.566979 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.571766 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.596305 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.622371 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.639885 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:38Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.670325 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.670409 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.670431 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.670464 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.670485 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.776344 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.776407 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.776419 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.776437 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.776446 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.778856 4861 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.879199 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.879245 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.879258 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.879279 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.879294 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.982752 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.982822 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.982843 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.982872 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:38 crc kubenswrapper[4861]: I0129 06:35:38.982894 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:38Z","lastTransitionTime":"2026-01-29T06:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.062430 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 01:13:53.357219094 +0000 UTC Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.086409 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.086484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.086504 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.086539 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.086560 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.138922 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.156111 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.176680 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.190153 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.190205 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.190217 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.190237 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.190250 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.198223 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.219908 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.238514 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.262836 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.282691 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.293449 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.293501 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.293563 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.293583 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.293599 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.304682 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.340193 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.361227 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.374379 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerStarted","Data":"92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.378312 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.385857 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.396319 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.397433 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.397600 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.397781 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.397961 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.398110 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.418882 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.437138 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.459354 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.484753 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.498463 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.501668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.501730 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.501752 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.501778 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.501805 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.526823 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.550063 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.573532 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.596114 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.604783 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.604855 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.604876 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.604906 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.604927 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.616231 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.637909 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.659891 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.679844 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.696446 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.708146 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.708195 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.708207 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.708224 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.708236 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.713883 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.811435 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.811858 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.812044 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.812288 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.812438 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.915730 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.915794 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.915817 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.915852 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:39 crc kubenswrapper[4861]: I0129 06:35:39.915874 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:39Z","lastTransitionTime":"2026-01-29T06:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.019709 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.019772 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.019790 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.019818 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.019837 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.062816 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 18:18:15.319599244 +0000 UTC Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.116412 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.116575 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.116449 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:40 crc kubenswrapper[4861]: E0129 06:35:40.116734 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:40 crc kubenswrapper[4861]: E0129 06:35:40.117116 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:40 crc kubenswrapper[4861]: E0129 06:35:40.117374 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.123517 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.123610 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.123634 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.123667 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.123703 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.227098 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.227176 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.227195 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.227222 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.227241 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.330888 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.330948 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.330967 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.330991 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.331009 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.395021 4861 generic.go:334] "Generic (PLEG): container finished" podID="bccb8691-d6c8-4698-98ec-1f20073e61c4" containerID="92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2" exitCode=0 Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.395132 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerDied","Data":"92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.418389 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.435002 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.435068 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.435159 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.435187 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.435207 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.443204 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.475787 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.496414 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.518136 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.539021 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.539134 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.539151 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.539195 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.539212 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.550689 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.575477 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.594130 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.610989 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.630904 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.642852 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.642904 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.642919 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.642939 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.642952 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.655494 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.676116 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.691239 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.707035 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:40Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.746912 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.746955 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.746968 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.746987 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.747002 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.849426 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.849510 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.849530 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.849563 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.849583 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.952152 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.952197 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.952209 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.952228 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:40 crc kubenswrapper[4861]: I0129 06:35:40.952241 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:40Z","lastTransitionTime":"2026-01-29T06:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.055389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.055808 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.055823 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.055841 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.055855 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.063138 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 01:16:36.008621606 +0000 UTC Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.158292 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.158345 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.158361 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.158382 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.158398 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.262162 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.262217 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.262231 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.262306 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.262320 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.366143 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.366206 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.366225 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.366251 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.366270 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.405629 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.405934 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.405993 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.412020 4861 generic.go:334] "Generic (PLEG): container finished" podID="bccb8691-d6c8-4698-98ec-1f20073e61c4" containerID="4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7" exitCode=0 Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.412126 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerDied","Data":"4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.423621 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.440341 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.456052 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.457629 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.460549 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.469598 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.469649 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.469669 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.469697 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.469719 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.475797 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.491516 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.512534 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.528463 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.541400 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.562704 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.573584 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.577422 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.577474 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.577484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.577516 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.577527 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.589057 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.610757 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.625296 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.640995 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.659679 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.676145 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.681904 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.681972 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.681983 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.682021 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.682037 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.693355 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.712516 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.727136 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.751677 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.767331 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.796810 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.796863 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.796880 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.796907 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.796926 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.808713 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.833872 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.871689 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.889578 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.899399 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.899438 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.899452 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.899475 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.899490 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:41Z","lastTransitionTime":"2026-01-29T06:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.913421 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.927560 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:41 crc kubenswrapper[4861]: I0129 06:35:41.942778 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:41Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.002775 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.003129 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.003210 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.003285 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.003351 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.063976 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 06:15:42.975028572 +0000 UTC Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.106751 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.106800 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.106811 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.106831 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.106848 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.115931 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:42 crc kubenswrapper[4861]: E0129 06:35:42.116086 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.116406 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:42 crc kubenswrapper[4861]: E0129 06:35:42.116458 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.116502 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:42 crc kubenswrapper[4861]: E0129 06:35:42.116548 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.209167 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.209445 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.209456 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.209476 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.209491 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.313515 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.313586 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.313609 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.313651 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.313677 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.418825 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.418882 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.418898 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.418922 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.418939 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.424717 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" event={"ID":"bccb8691-d6c8-4698-98ec-1f20073e61c4","Type":"ContainerStarted","Data":"891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.424800 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.447996 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.473584 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.496816 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.517336 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.522611 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.522677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.522697 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.522727 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.522749 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.536560 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.556664 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.577591 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.595187 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.612148 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.627053 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.627172 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.627191 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.627220 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.627241 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.634627 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.654543 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.677812 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.713540 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.743529 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:42Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.746219 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.746288 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.746309 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.746343 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.746363 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.849747 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.849823 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.849840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.849868 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.849885 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.953235 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.953311 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.953331 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.953357 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:42 crc kubenswrapper[4861]: I0129 06:35:42.953375 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:42Z","lastTransitionTime":"2026-01-29T06:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.057387 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.057484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.057506 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.057536 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.057558 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.065651 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 04:58:24.679194373 +0000 UTC Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.161365 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.161459 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.161483 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.161516 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.161545 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.264899 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.264956 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.264975 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.265004 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.265025 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.369660 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.369799 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.369818 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.369848 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.369870 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.430148 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.473726 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.473810 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.473832 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.473861 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.473884 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.577959 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.578034 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.578103 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.578134 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.578155 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.682183 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.682276 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.682302 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.682333 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.682355 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.785739 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.785802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.785819 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.785866 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.785887 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.888281 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.888371 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.888389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.888424 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.888459 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.991104 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.991157 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.991172 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.991189 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:43 crc kubenswrapper[4861]: I0129 06:35:43.991201 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:43Z","lastTransitionTime":"2026-01-29T06:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.066755 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 20:08:07.146154058 +0000 UTC Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.094224 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.094285 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.094298 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.094319 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.094333 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.115670 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.115757 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.115709 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.115820 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.115890 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.115961 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.196603 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.196678 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.196693 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.196712 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.196748 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.300298 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.300375 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.300397 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.300431 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.300453 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.404044 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.404144 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.404159 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.404175 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.404189 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.508242 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.508316 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.508334 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.508365 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.508383 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.611369 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.611447 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.611471 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.611499 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.611524 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.716681 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.716737 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.716752 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.716771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.716785 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.796610 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.796898 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:36:00.796849869 +0000 UTC m=+52.468344486 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.820650 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.820702 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.820718 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.820743 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.820761 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.898196 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.898249 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.898281 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.898309 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898464 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898513 4861 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898539 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898580 4861 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898615 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:36:00.898589479 +0000 UTC m=+52.570084076 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898669 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 06:36:00.89863924 +0000 UTC m=+52.570133827 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898492 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898708 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898723 4861 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898771 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 06:36:00.898756873 +0000 UTC m=+52.570251460 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898480 4861 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:44 crc kubenswrapper[4861]: E0129 06:35:44.898847 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:36:00.898831375 +0000 UTC m=+52.570325942 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.924214 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.924276 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.924295 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.924323 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:44 crc kubenswrapper[4861]: I0129 06:35:44.924357 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:44Z","lastTransitionTime":"2026-01-29T06:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.027566 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.027658 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.027678 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.027702 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.027719 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.066975 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 07:44:59.191966707 +0000 UTC Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.132013 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.132159 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.132184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.132280 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.132301 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.179842 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf"] Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.182524 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.187201 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.187225 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.204678 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.227780 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.236236 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.236293 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.236310 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.236339 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.236358 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.262135 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.278797 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.297978 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.302065 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.302201 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.302242 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.302478 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8c77\" (UniqueName: \"kubernetes.io/projected/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-kube-api-access-j8c77\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.318644 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.320979 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.339694 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.339753 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.339772 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.339798 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.339819 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.347480 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.369754 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.388914 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.403525 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.403770 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.404069 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.404397 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8c77\" (UniqueName: \"kubernetes.io/projected/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-kube-api-access-j8c77\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.405137 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.405222 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.412144 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.415584 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.433882 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8c77\" (UniqueName: \"kubernetes.io/projected/8d72d8dc-6f15-4586-acde-6e8ca7b60c12-kube-api-access-j8c77\") pod \"ovnkube-control-plane-749d76644c-2pbqf\" (UID: \"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.434702 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.442376 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.442430 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.442448 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.442476 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.442497 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.442767 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/0.log" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.448141 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02" exitCode=1 Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.448206 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.449453 4861 scope.go:117] "RemoveContainer" containerID="a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.458497 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.478334 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.503528 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.504268 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.530978 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: W0129 06:35:45.534044 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d72d8dc_6f15_4586_acde_6e8ca7b60c12.slice/crio-3b8b4e0735654a1844e52e42c815e742fd3043d837934a354196196eaf0ffa8f WatchSource:0}: Error finding container 3b8b4e0735654a1844e52e42c815e742fd3043d837934a354196196eaf0ffa8f: Status 404 returned error can't find the container with id 3b8b4e0735654a1844e52e42c815e742fd3043d837934a354196196eaf0ffa8f Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.545726 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.545790 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.545812 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.545842 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.545866 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.549670 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.557455 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.567475 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.584777 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.602502 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.623046 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.642407 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.649122 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.649225 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.649252 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.649288 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.649314 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.657534 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.678371 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.705718 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"ory.go:656] Stopping watch factory\\\\nI0129 06:35:44.819250 6168 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819408 6168 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819532 6168 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819535 6168 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819612 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819740 6168 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.820018 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.720675 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.738276 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.754164 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.754217 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.754240 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.754268 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.754287 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.757689 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.773315 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.791481 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.807348 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.827890 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.840481 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.853937 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.857433 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.857472 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.857487 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.857518 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.857537 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.871647 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.885521 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.901522 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.912934 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.927113 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.942507 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.950674 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-rh69l"] Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.951712 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:45 crc kubenswrapper[4861]: E0129 06:35:45.951832 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.956448 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.956496 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.956512 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.956531 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.956547 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.960542 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.975915 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: E0129 06:35:45.976642 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:45Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.980802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.980839 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.980853 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.980875 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:45 crc kubenswrapper[4861]: I0129 06:35:45.980888 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:45Z","lastTransitionTime":"2026-01-29T06:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.003803 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.008775 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.008810 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.008822 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.008840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.008858 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.013165 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"ory.go:656] Stopping watch factory\\\\nI0129 06:35:44.819250 6168 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819408 6168 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819532 6168 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819535 6168 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819612 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819740 6168 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.820018 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.025322 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.030339 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.030383 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.030403 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.030427 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.030441 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.031758 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.054295 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.055173 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.058946 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.058998 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.059011 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.059027 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.059040 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.067893 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 01:17:28.468676804 +0000 UTC Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.072092 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.082727 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.082898 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.085261 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.085309 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.085328 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.085353 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.085373 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.092507 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.112356 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggcwq\" (UniqueName: \"kubernetes.io/projected/fb22f8f6-1210-4f39-8712-d33efc26239c-kube-api-access-ggcwq\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.112461 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.116104 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.116155 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.116174 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.116782 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.116982 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.117292 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.189758 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.189815 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.189835 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.189858 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.189870 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.213748 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggcwq\" (UniqueName: \"kubernetes.io/projected/fb22f8f6-1210-4f39-8712-d33efc26239c-kube-api-access-ggcwq\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.213822 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.213995 4861 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.214102 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs podName:fb22f8f6-1210-4f39-8712-d33efc26239c nodeName:}" failed. No retries permitted until 2026-01-29 06:35:46.714052392 +0000 UTC m=+38.385546949 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs") pod "network-metrics-daemon-rh69l" (UID: "fb22f8f6-1210-4f39-8712-d33efc26239c") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.232283 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.233300 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggcwq\" (UniqueName: \"kubernetes.io/projected/fb22f8f6-1210-4f39-8712-d33efc26239c-kube-api-access-ggcwq\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.293227 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.293278 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.293291 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.293311 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.293324 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.295937 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.320487 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.339839 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.363625 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.380846 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.396499 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.396548 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.396561 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.396581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.396595 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.397943 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.427521 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.447829 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.454255 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/0.log" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.464552 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.465184 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.466897 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" event={"ID":"8d72d8dc-6f15-4586-acde-6e8ca7b60c12","Type":"ContainerStarted","Data":"fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.466948 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" event={"ID":"8d72d8dc-6f15-4586-acde-6e8ca7b60c12","Type":"ContainerStarted","Data":"499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.466960 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" event={"ID":"8d72d8dc-6f15-4586-acde-6e8ca7b60c12","Type":"ContainerStarted","Data":"3b8b4e0735654a1844e52e42c815e742fd3043d837934a354196196eaf0ffa8f"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.470813 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.492259 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.499360 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.499424 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.499440 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.499466 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.499482 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.510492 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.533193 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.559485 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"ory.go:656] Stopping watch factory\\\\nI0129 06:35:44.819250 6168 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819408 6168 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819532 6168 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819535 6168 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819612 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819740 6168 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.820018 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.573331 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.600040 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.601898 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.601942 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.601955 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.601972 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.601983 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.630606 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.646163 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.662185 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.673582 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.688484 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.704590 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.704642 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.704653 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.704670 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.704681 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.705481 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.719103 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.719366 4861 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:46 crc kubenswrapper[4861]: E0129 06:35:46.719486 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs podName:fb22f8f6-1210-4f39-8712-d33efc26239c nodeName:}" failed. No retries permitted until 2026-01-29 06:35:47.719459681 +0000 UTC m=+39.390954248 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs") pod "network-metrics-daemon-rh69l" (UID: "fb22f8f6-1210-4f39-8712-d33efc26239c") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.723901 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.742019 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.755481 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.772507 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.786585 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.799549 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.813737 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.814027 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.814064 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.814141 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.814162 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.817205 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.836325 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"ory.go:656] Stopping watch factory\\\\nI0129 06:35:44.819250 6168 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819408 6168 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819532 6168 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819535 6168 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819612 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819740 6168 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.820018 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.846801 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.918729 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.918777 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.918794 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.918816 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:46 crc kubenswrapper[4861]: I0129 06:35:46.918836 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:46Z","lastTransitionTime":"2026-01-29T06:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.022894 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.022962 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.022974 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.022998 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.023013 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.068381 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 04:57:54.15865128 +0000 UTC Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.125635 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.125714 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.125729 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.125749 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.125765 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.229115 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.229170 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.229187 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.229216 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.229240 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.332661 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.332731 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.332744 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.332762 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.332774 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.436306 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.436351 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.436361 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.436378 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.436387 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.473106 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/1.log" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.473935 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/0.log" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.477701 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7" exitCode=1 Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.477776 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.477870 4861 scope.go:117] "RemoveContainer" containerID="a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.479470 4861 scope.go:117] "RemoveContainer" containerID="69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7" Jan 29 06:35:47 crc kubenswrapper[4861]: E0129 06:35:47.480693 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.504467 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.519775 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.537479 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.540275 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.540334 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.540353 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.540378 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.540396 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.558825 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.578271 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.594587 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.614229 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.637250 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a51eed7461d24467c8a045696e6849fe88c97a251caac987cb989a708c908f02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"ory.go:656] Stopping watch factory\\\\nI0129 06:35:44.819250 6168 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819408 6168 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 06:35:44.819532 6168 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819535 6168 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819612 6168 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.819740 6168 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 06:35:44.820018 6168 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"\\\\nI0129 06:35:46.904426 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904435 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904439 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nF0129 06:35:46.904440 6316 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:35:46.904459 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-ope\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.644286 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.644328 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.644344 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.644368 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.644388 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.655968 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.671178 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.689444 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.713495 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.731123 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:47 crc kubenswrapper[4861]: E0129 06:35:47.731306 4861 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:47 crc kubenswrapper[4861]: E0129 06:35:47.731375 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs podName:fb22f8f6-1210-4f39-8712-d33efc26239c nodeName:}" failed. No retries permitted until 2026-01-29 06:35:49.731353855 +0000 UTC m=+41.402848452 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs") pod "network-metrics-daemon-rh69l" (UID: "fb22f8f6-1210-4f39-8712-d33efc26239c") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.736192 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.747236 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.747313 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.747332 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.747361 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.747382 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.761235 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.778627 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.796804 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:47Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.850528 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.850640 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.850670 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.850706 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.850733 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.953988 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.954058 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.954104 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.954130 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:47 crc kubenswrapper[4861]: I0129 06:35:47.954151 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:47Z","lastTransitionTime":"2026-01-29T06:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.057476 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.057543 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.057562 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.057589 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.057607 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.068633 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 02:49:47.962630793 +0000 UTC Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.116150 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.116280 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.116279 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.116303 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:48 crc kubenswrapper[4861]: E0129 06:35:48.116439 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:48 crc kubenswrapper[4861]: E0129 06:35:48.116583 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:48 crc kubenswrapper[4861]: E0129 06:35:48.116760 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:35:48 crc kubenswrapper[4861]: E0129 06:35:48.116880 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.161111 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.161174 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.161192 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.161217 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.161237 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.264754 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.264827 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.264848 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.264880 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.264903 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.369141 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.369212 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.369231 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.369265 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.369289 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.471639 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.471716 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.471736 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.471767 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.471789 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.484469 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/1.log" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.490271 4861 scope.go:117] "RemoveContainer" containerID="69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7" Jan 29 06:35:48 crc kubenswrapper[4861]: E0129 06:35:48.490575 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.512203 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.529226 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.549997 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.567275 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.576098 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.576165 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.576189 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.576219 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.576245 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.587417 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.607658 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.623405 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.638855 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.656189 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.670019 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.684376 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.684414 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.684422 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.684436 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.684445 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.708926 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.750729 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"\\\\nI0129 06:35:46.904426 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904435 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904439 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nF0129 06:35:46.904440 6316 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:35:46.904459 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-ope\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.762646 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.773103 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.785541 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.786710 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.786747 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.786757 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.786774 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.786784 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.801847 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.890525 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.890812 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.890909 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.890995 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.891112 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.994219 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.994617 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.994712 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.994802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:48 crc kubenswrapper[4861]: I0129 06:35:48.994888 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:48Z","lastTransitionTime":"2026-01-29T06:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.068940 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 19:14:09.792185135 +0000 UTC Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.098966 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.099030 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.099048 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.099099 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.099118 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.136149 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.156231 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.178194 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.201240 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.203229 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.203366 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.203382 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.203405 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.203419 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.241446 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"\\\\nI0129 06:35:46.904426 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904435 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904439 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nF0129 06:35:46.904440 6316 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:35:46.904459 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-ope\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.264443 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.293545 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.307981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.308045 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.308066 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.308134 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.308155 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.311687 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.337404 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.363640 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.382428 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.404687 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.411325 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.411370 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.411385 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.411408 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.411422 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.426699 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.445943 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.466371 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.484404 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.514836 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.514881 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.514916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.514936 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.514949 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.617593 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.617644 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.617658 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.617674 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.618051 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.721479 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.721538 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.721562 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.721591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.721610 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.757287 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:49 crc kubenswrapper[4861]: E0129 06:35:49.757594 4861 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:49 crc kubenswrapper[4861]: E0129 06:35:49.758275 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs podName:fb22f8f6-1210-4f39-8712-d33efc26239c nodeName:}" failed. No retries permitted until 2026-01-29 06:35:53.758224889 +0000 UTC m=+45.429719476 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs") pod "network-metrics-daemon-rh69l" (UID: "fb22f8f6-1210-4f39-8712-d33efc26239c") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.825473 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.825554 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.825574 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.825605 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.825624 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.929761 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.929839 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.929860 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.929891 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:49 crc kubenswrapper[4861]: I0129 06:35:49.929916 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:49Z","lastTransitionTime":"2026-01-29T06:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.034062 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.034174 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.034198 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.034229 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.034249 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.070639 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 15:53:33.916133588 +0000 UTC Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.116391 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.116475 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:50 crc kubenswrapper[4861]: E0129 06:35:50.116916 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:50 crc kubenswrapper[4861]: E0129 06:35:50.117099 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.116490 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.116549 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:50 crc kubenswrapper[4861]: E0129 06:35:50.117327 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:35:50 crc kubenswrapper[4861]: E0129 06:35:50.117576 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.138430 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.138501 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.138519 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.138546 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.138594 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.242469 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.242549 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.242569 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.242596 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.242616 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.345600 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.345672 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.345692 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.345721 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.345744 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.449521 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.449590 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.449608 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.449637 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.449658 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.553365 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.553470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.553497 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.553534 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.553555 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.657148 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.657235 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.657256 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.657290 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.657314 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.760662 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.760755 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.760780 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.760806 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.760828 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.864892 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.864982 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.865025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.865060 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.865125 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.969222 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.969298 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.969324 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.969355 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:50 crc kubenswrapper[4861]: I0129 06:35:50.969447 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:50Z","lastTransitionTime":"2026-01-29T06:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.070877 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 09:08:10.137847223 +0000 UTC Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.072789 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.072999 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.073168 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.073338 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.073488 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.177136 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.177237 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.177266 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.177304 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.177327 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.280032 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.280605 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.280753 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.280903 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.281043 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.384715 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.384845 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.384868 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.384895 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.384912 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.488608 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.488670 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.488688 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.488718 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.488737 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.592717 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.593346 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.593761 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.594153 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.594513 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.699031 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.699112 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.699130 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.699157 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.699182 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.802398 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.802474 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.802498 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.802569 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.802590 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.906129 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.906191 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.906211 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.906238 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:51 crc kubenswrapper[4861]: I0129 06:35:51.906256 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:51Z","lastTransitionTime":"2026-01-29T06:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.008554 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.008888 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.009203 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.009285 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.009428 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.071710 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 03:30:36.09643169 +0000 UTC Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.117939 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.118066 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.117984 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:52 crc kubenswrapper[4861]: E0129 06:35:52.118303 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.118791 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.118853 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.118869 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.118892 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.118913 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: E0129 06:35:52.118972 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:52 crc kubenswrapper[4861]: E0129 06:35:52.118779 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.120284 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:52 crc kubenswrapper[4861]: E0129 06:35:52.120616 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.221915 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.221963 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.221975 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.222002 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.222014 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.324941 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.324983 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.324997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.325017 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.325031 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.427775 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.427824 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.427835 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.427857 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.427871 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.531119 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.531197 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.531214 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.531241 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.531260 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.634291 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.634356 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.634374 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.634400 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.634419 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.737323 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.737399 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.737412 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.737429 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.737442 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.841064 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.841534 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.841718 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.841863 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.841995 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.945334 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.945378 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.945389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.945406 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:52 crc kubenswrapper[4861]: I0129 06:35:52.945416 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:52Z","lastTransitionTime":"2026-01-29T06:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.048770 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.048821 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.048834 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.048853 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.048865 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.072415 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 09:33:18.265331424 +0000 UTC Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.152424 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.152989 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.153174 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.153340 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.153465 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.256889 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.256966 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.256990 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.257017 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.257040 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.360576 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.360634 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.360647 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.360669 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.360687 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.464039 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.464098 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.464114 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.464134 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.464146 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.567254 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.567321 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.567339 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.567365 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.567383 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.669658 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.669832 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.669864 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.669890 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.669911 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.773488 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.773560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.773581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.773607 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.773626 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.801860 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:53 crc kubenswrapper[4861]: E0129 06:35:53.802120 4861 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:53 crc kubenswrapper[4861]: E0129 06:35:53.802294 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs podName:fb22f8f6-1210-4f39-8712-d33efc26239c nodeName:}" failed. No retries permitted until 2026-01-29 06:36:01.802255659 +0000 UTC m=+53.473750256 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs") pod "network-metrics-daemon-rh69l" (UID: "fb22f8f6-1210-4f39-8712-d33efc26239c") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.877218 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.877313 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.877341 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.877388 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.877417 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.981773 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.981856 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.981885 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.981922 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:53 crc kubenswrapper[4861]: I0129 06:35:53.981949 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:53Z","lastTransitionTime":"2026-01-29T06:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.072952 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 06:30:55.304279558 +0000 UTC Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.085144 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.085231 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.085248 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.085273 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.085292 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.115912 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.116024 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.116159 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:54 crc kubenswrapper[4861]: E0129 06:35:54.116047 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:54 crc kubenswrapper[4861]: E0129 06:35:54.116265 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:35:54 crc kubenswrapper[4861]: E0129 06:35:54.116454 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.116537 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:54 crc kubenswrapper[4861]: E0129 06:35:54.116673 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.189389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.189495 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.189527 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.189565 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.189603 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.293671 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.293749 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.293769 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.293799 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.293818 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.396940 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.397021 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.397043 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.397075 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.397127 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.500202 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.500522 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.500960 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.501355 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.501695 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.605600 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.605709 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.605729 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.605756 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.605779 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.708812 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.708877 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.708898 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.708928 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.708952 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.812452 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.812504 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.812521 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.812541 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.812555 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.914941 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.915009 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.915027 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.915054 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:54 crc kubenswrapper[4861]: I0129 06:35:54.915102 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:54Z","lastTransitionTime":"2026-01-29T06:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.018450 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.018780 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.018897 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.019016 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.019158 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.073473 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 12:05:33.525937308 +0000 UTC Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.123152 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.123552 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.123722 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.123933 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.124195 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.228124 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.228213 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.228236 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.228272 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.228297 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.330992 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.331058 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.331123 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.331157 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.331202 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.435375 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.435443 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.435464 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.435492 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.435510 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.539050 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.539178 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.539201 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.539230 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.539251 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.642993 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.643050 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.643099 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.643128 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.643148 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.745846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.746213 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.746400 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.746551 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.746688 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.850368 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.850428 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.850447 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.850470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.850488 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.954983 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.955067 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.955123 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.955153 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:55 crc kubenswrapper[4861]: I0129 06:35:55.955171 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:55Z","lastTransitionTime":"2026-01-29T06:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.058189 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.058273 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.058306 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.058338 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.058365 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.073978 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 21:19:11.715185385 +0000 UTC Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.115433 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.115423 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.115545 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.115592 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.115804 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.115954 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.116167 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.116298 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.161782 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.161855 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.161877 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.161907 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.161929 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.265395 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.265544 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.265570 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.265603 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.265626 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.369776 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.369843 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.369861 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.369887 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.369904 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.463725 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.463800 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.463824 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.463854 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.463872 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.486755 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:56Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.492658 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.492722 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.492739 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.492765 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.492784 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.516064 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:56Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.522256 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.522322 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.522343 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.522374 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.522394 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.544436 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:56Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.550538 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.550621 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.550642 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.550668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.550689 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.572260 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:56Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.578850 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.578902 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.578961 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.578985 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.579036 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.600615 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:56Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:56 crc kubenswrapper[4861]: E0129 06:35:56.600849 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.603369 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.603430 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.603454 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.603484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.603507 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.707287 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.707769 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.708281 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.708532 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.708753 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.812872 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.812956 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.812981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.813014 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.813036 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.915816 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.915895 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.915917 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.915946 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:56 crc kubenswrapper[4861]: I0129 06:35:56.915966 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:56Z","lastTransitionTime":"2026-01-29T06:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.019534 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.019603 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.019621 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.019644 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.019661 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.074821 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 08:43:10.192917109 +0000 UTC Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.122257 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.122319 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.122337 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.122362 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.122379 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.227026 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.227498 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.227698 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.227876 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.228017 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.331977 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.332605 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.332896 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.333260 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.333560 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.436916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.437342 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.437442 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.437537 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.437628 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.540047 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.540589 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.540680 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.540763 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.540840 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.643612 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.643677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.643690 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.643711 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.643724 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.747501 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.747578 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.747589 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.747606 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.747619 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.851144 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.851208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.851279 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.851302 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.851322 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.955803 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.955886 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.955909 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.955941 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:57 crc kubenswrapper[4861]: I0129 06:35:57.955965 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:57Z","lastTransitionTime":"2026-01-29T06:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.059297 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.059368 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.059412 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.059448 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.059469 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.075784 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 14:14:51.774736947 +0000 UTC Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.116320 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.116384 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:35:58 crc kubenswrapper[4861]: E0129 06:35:58.116512 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.116575 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:35:58 crc kubenswrapper[4861]: E0129 06:35:58.116776 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:35:58 crc kubenswrapper[4861]: E0129 06:35:58.116858 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.117018 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:35:58 crc kubenswrapper[4861]: E0129 06:35:58.117262 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.163657 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.163752 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.163771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.163826 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.163843 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.267315 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.267375 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.267395 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.267419 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.267436 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.371118 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.371467 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.371646 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.371894 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.372135 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.475280 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.475327 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.475341 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.475361 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.475376 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.577977 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.578033 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.578052 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.578105 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.578123 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.681283 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.681350 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.681374 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.681400 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.681419 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.785491 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.785549 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.785561 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.785579 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.785591 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.889187 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.889282 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.889321 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.889355 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.889382 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.993032 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.993112 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.993126 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.993145 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:58 crc kubenswrapper[4861]: I0129 06:35:58.993156 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:58Z","lastTransitionTime":"2026-01-29T06:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.077119 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 04:08:54.580452827 +0000 UTC Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.096720 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.096825 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.096845 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.096869 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.096887 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.135433 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.160473 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.174642 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.194884 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.207483 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.207559 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.207577 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.207602 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.207620 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.215415 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.235378 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.250512 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.272271 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.291865 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.309931 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.311356 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.311593 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.311794 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.312066 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.312398 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.328007 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.356778 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"\\\\nI0129 06:35:46.904426 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904435 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904439 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nF0129 06:35:46.904440 6316 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:35:46.904459 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-ope\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.373985 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.395103 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.414678 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.415997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.416042 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.416062 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.416116 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.416134 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.434846 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.519512 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.519583 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.519597 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.519615 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.519629 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.622659 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.622721 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.622742 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.622767 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.622785 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.726698 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.726767 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.726789 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.726842 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.726862 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.829965 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.830024 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.830042 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.830065 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.830112 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.933607 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.933659 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.933676 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.933701 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:35:59 crc kubenswrapper[4861]: I0129 06:35:59.933718 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:35:59Z","lastTransitionTime":"2026-01-29T06:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.042796 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.042879 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.042902 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.042932 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.042959 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.077335 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 12:16:40.88483188 +0000 UTC Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.116560 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.116836 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.117607 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.117613 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.118194 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.117810 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.118700 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.119147 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.147329 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.147400 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.147425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.147461 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.147634 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.251556 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.251609 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.251629 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.251661 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.251684 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.354058 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.354145 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.354166 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.354194 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.354212 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.457064 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.457173 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.457193 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.457224 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.457245 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.559892 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.559981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.560005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.560032 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.560053 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.663009 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.663125 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.663152 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.663181 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.663245 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.766401 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.766457 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.766474 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.766521 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.766540 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.869472 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.869542 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.869561 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.869591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.869614 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.892960 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.893186 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:36:32.893152556 +0000 UTC m=+84.564647143 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.973022 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.973124 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.973150 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.973184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.973208 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:00Z","lastTransitionTime":"2026-01-29T06:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.994873 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.994967 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.995037 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:00 crc kubenswrapper[4861]: I0129 06:36:00.995131 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995191 4861 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995234 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995280 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:36:32.995252365 +0000 UTC m=+84.666746962 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995297 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995330 4861 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995337 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995372 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995394 4861 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995391 4861 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995433 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 06:36:32.995395248 +0000 UTC m=+84.666889895 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995476 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 06:36:32.99545787 +0000 UTC m=+84.666952577 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:36:00 crc kubenswrapper[4861]: E0129 06:36:00.995508 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:36:32.995492041 +0000 UTC m=+84.666986708 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.076715 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.076790 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.076815 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.076844 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.076872 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.077537 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 06:02:19.291181621 +0000 UTC Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.180417 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.180475 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.180493 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.180519 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.180539 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.283489 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.283553 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.283570 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.283595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.283613 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.386921 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.387033 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.387054 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.387160 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.387188 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.490428 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.490553 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.490573 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.490643 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.490662 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.594149 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.594588 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.594760 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.594857 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.594945 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.698372 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.698439 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.698459 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.698484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.698503 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.801716 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.801793 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.801816 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.801846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.801871 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.803713 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:01 crc kubenswrapper[4861]: E0129 06:36:01.803895 4861 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:36:01 crc kubenswrapper[4861]: E0129 06:36:01.803983 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs podName:fb22f8f6-1210-4f39-8712-d33efc26239c nodeName:}" failed. No retries permitted until 2026-01-29 06:36:17.803958546 +0000 UTC m=+69.475453143 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs") pod "network-metrics-daemon-rh69l" (UID: "fb22f8f6-1210-4f39-8712-d33efc26239c") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.905208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.905285 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.905316 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.905346 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:01 crc kubenswrapper[4861]: I0129 06:36:01.905370 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:01Z","lastTransitionTime":"2026-01-29T06:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.008469 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.008534 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.008554 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.008595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.008615 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.078553 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 07:46:37.252922046 +0000 UTC Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.108800 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.112724 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.112786 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.112804 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.112831 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.112849 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.115953 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.115995 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:02 crc kubenswrapper[4861]: E0129 06:36:02.116171 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.115974 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:02 crc kubenswrapper[4861]: E0129 06:36:02.116356 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.116423 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:02 crc kubenswrapper[4861]: E0129 06:36:02.116728 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:02 crc kubenswrapper[4861]: E0129 06:36:02.116616 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.129155 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.133014 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.154109 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.186192 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"\\\\nI0129 06:35:46.904426 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904435 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904439 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nF0129 06:35:46.904440 6316 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:35:46.904459 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-ope\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.202872 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.215715 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.215783 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.215802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.215828 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.215848 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.220466 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.241868 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.265736 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.291110 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.314558 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.318997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.319118 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.319146 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.319182 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.319207 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.333588 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.347335 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.367650 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.390725 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.411006 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.423028 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.423116 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.423142 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.423176 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.423204 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.431212 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.453668 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:02Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.526117 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.526182 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.526192 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.526224 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.526234 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.629486 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.629559 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.629570 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.629625 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.629647 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.732932 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.733575 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.733784 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.733950 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.734288 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.837421 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.837475 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.837495 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.837520 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.837540 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.940609 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.941035 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.941161 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.941266 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:02 crc kubenswrapper[4861]: I0129 06:36:02.941368 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:02Z","lastTransitionTime":"2026-01-29T06:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.044387 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.044956 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.045173 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.045340 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.045474 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.079336 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 19:03:11.45302738 +0000 UTC Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.117321 4861 scope.go:117] "RemoveContainer" containerID="69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.149425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.149764 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.149934 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.150140 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.150416 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.255007 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.255452 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.255690 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.255968 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.256907 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.362798 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.362859 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.362881 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.362913 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.362933 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.466358 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.466425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.466443 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.466468 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.466485 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.551050 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/1.log" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.555456 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.556437 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.569441 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.569503 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.569522 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.569549 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.569571 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.578577 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.598516 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.630273 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"\\\\nI0129 06:35:46.904426 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904435 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904439 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nF0129 06:35:46.904440 6316 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:35:46.904459 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-ope\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.648798 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.667510 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.672191 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.672259 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.672277 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.672307 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.672329 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.691316 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.708572 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.766746 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.774508 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.774545 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.774555 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.774573 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.774584 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.788821 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.802831 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.815950 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.825826 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.837359 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.849619 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.862298 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.872965 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.876594 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.876633 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.876645 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.876666 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.876680 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.886817 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:03Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.979232 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.979812 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.979903 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.979973 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:03 crc kubenswrapper[4861]: I0129 06:36:03.980040 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:03Z","lastTransitionTime":"2026-01-29T06:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.080657 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 05:22:36.08285937 +0000 UTC Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.083052 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.083116 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.083133 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.083151 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.083167 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.115538 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.115595 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.115602 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.115628 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:04 crc kubenswrapper[4861]: E0129 06:36:04.115725 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:04 crc kubenswrapper[4861]: E0129 06:36:04.115838 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:04 crc kubenswrapper[4861]: E0129 06:36:04.115970 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:04 crc kubenswrapper[4861]: E0129 06:36:04.116147 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.186918 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.187188 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.187328 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.187467 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.187602 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.291235 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.291572 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.291772 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.291974 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.292208 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.396566 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.396615 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.396668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.396691 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.396711 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.501060 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.501200 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.501223 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.501249 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.501269 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.562511 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/2.log" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.564214 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/1.log" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.568950 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed" exitCode=1 Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.569013 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.569126 4861 scope.go:117] "RemoveContainer" containerID="69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.570244 4861 scope.go:117] "RemoveContainer" containerID="a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed" Jan 29 06:36:04 crc kubenswrapper[4861]: E0129 06:36:04.570539 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.588306 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.604509 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.604562 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.604584 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.604610 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.604632 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.610513 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.627249 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.649052 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.666688 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.682010 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.701285 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.707246 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.707367 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.707390 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.707416 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.707434 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.718301 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.735001 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.750956 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.768611 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.789226 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.808204 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.810372 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.810429 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.810448 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.810476 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.810494 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.828415 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.858978 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69f9abaf17d0f3e7f8135f9261a4434b8005ab9017c592f14435ca0c68032ca7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"message\\\":\\\"\\\\nI0129 06:35:46.904426 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904435 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf\\\\nI0129 06:35:46.904439 6316 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nF0129 06:35:46.904440 6316 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:35:46Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:35:46.904459 6316 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-ope\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.874406 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.891922 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:04Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.914226 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.914587 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.914743 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.914899 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:04 crc kubenswrapper[4861]: I0129 06:36:04.915062 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:04Z","lastTransitionTime":"2026-01-29T06:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.018849 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.018916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.018934 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.018986 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.019006 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.080890 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 05:22:39.883451258 +0000 UTC Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.120959 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.120997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.121005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.121037 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.121048 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.223652 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.223775 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.223799 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.223830 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.223854 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.327513 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.327602 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.327625 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.327656 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.327678 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.431750 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.432272 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.432415 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.432554 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.432738 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.537397 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.537490 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.537518 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.537556 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.537581 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.575821 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/2.log" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.584195 4861 scope.go:117] "RemoveContainer" containerID="a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed" Jan 29 06:36:05 crc kubenswrapper[4861]: E0129 06:36:05.584435 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.615390 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.640904 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.640955 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.640965 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.640981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.640990 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.670508 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.691133 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.701814 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.714849 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.727410 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.743631 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.743674 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.743684 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.743701 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.743715 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.744930 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.762813 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.778182 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.793021 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.808230 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.825638 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.844907 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.846820 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.846858 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.846871 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.846889 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.846903 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.863354 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.876697 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.897046 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.910957 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:05Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.950731 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.950791 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.950818 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.950848 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:05 crc kubenswrapper[4861]: I0129 06:36:05.950872 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:05Z","lastTransitionTime":"2026-01-29T06:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.054179 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.054278 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.054295 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.054318 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.054335 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.082851 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 15:49:27.041570429 +0000 UTC Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.116209 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.116314 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.116209 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.116419 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.116395 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.116539 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.116630 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.116756 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.157402 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.157473 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.157491 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.157511 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.157526 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.261406 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.261478 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.261501 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.261534 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.261559 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.396903 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.396980 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.397007 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.397038 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.397063 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.500194 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.500250 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.500262 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.500276 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.500285 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.603057 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.603164 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.603183 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.603214 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.603240 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.698913 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.699430 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.699581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.699775 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.699906 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.724731 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:06Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.729461 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.729491 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.729503 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.729520 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.729533 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.755056 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:06Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.762138 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.762196 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.762214 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.762239 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.762258 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.787112 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:06Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.792700 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.792904 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.793041 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.793222 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.793427 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.816241 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:06Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.821995 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.822264 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.822464 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.822624 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.822770 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.845921 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:06Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:06 crc kubenswrapper[4861]: E0129 06:36:06.846196 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.848677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.848734 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.848758 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.848791 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.848813 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.952650 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.952715 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.952733 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.952756 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:06 crc kubenswrapper[4861]: I0129 06:36:06.952773 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:06Z","lastTransitionTime":"2026-01-29T06:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.055983 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.056036 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.056049 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.056067 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.056102 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.083102 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 22:36:46.428892525 +0000 UTC Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.158953 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.158994 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.159005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.159020 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.159031 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.262475 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.262536 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.262556 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.262580 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.262598 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.365713 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.365897 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.365980 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.366014 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.366037 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.469822 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.469888 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.469905 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.469929 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.469949 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.572934 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.572986 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.572999 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.573021 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.573034 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.676178 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.676221 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.676230 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.676248 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.676257 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.779456 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.779526 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.779547 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.779575 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.779595 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.882621 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.882695 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.882713 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.882746 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.882765 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.987344 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.987420 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.987441 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.987470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:07 crc kubenswrapper[4861]: I0129 06:36:07.987491 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:07Z","lastTransitionTime":"2026-01-29T06:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.084103 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 11:23:34.678147069 +0000 UTC Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.091044 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.091144 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.091168 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.091194 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.091213 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.115636 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.115771 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.115846 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:08 crc kubenswrapper[4861]: E0129 06:36:08.116059 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.116170 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:08 crc kubenswrapper[4861]: E0129 06:36:08.116235 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:08 crc kubenswrapper[4861]: E0129 06:36:08.116352 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:08 crc kubenswrapper[4861]: E0129 06:36:08.116437 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.194595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.194650 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.194668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.194694 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.194712 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.299748 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.302054 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.302119 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.302150 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.302170 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.408061 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.408155 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.408174 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.408201 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.408225 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.511541 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.511616 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.511636 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.511665 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.511688 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.614187 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.614246 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.614259 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.614279 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.614293 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.717311 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.717389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.717451 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.717479 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.717498 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.821249 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.821326 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.821353 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.821386 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.821408 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.924529 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.924597 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.924616 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.924643 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:08 crc kubenswrapper[4861]: I0129 06:36:08.924662 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:08Z","lastTransitionTime":"2026-01-29T06:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.027682 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.027743 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.027760 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.027785 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.027804 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.084658 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 23:21:22.288120667 +0000 UTC Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.129686 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.129787 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.129805 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.129829 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.129841 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.139410 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.160727 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.178178 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.196947 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.219908 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.234331 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.234388 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.234408 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.234435 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.234453 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.238687 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.254980 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.274374 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.294931 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.314294 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.330987 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.337675 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.337738 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.337757 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.337782 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.337800 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.357244 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.376478 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.397332 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.427738 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.441489 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.441547 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.441565 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.441594 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.441614 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.445571 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.459800 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:09Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.544626 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.544686 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.544704 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.544728 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.544745 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.648398 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.648471 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.648491 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.648521 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.648538 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.752610 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.753219 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.753232 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.753251 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.753269 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.856521 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.856592 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.856612 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.856639 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.856658 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.959614 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.959703 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.959729 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.959790 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:09 crc kubenswrapper[4861]: I0129 06:36:09.959818 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:09Z","lastTransitionTime":"2026-01-29T06:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.062802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.062872 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.062896 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.062926 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.062949 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.085632 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 07:32:54.27871704 +0000 UTC Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.116060 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.116144 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.116281 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:10 crc kubenswrapper[4861]: E0129 06:36:10.116278 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.116360 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:10 crc kubenswrapper[4861]: E0129 06:36:10.116476 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:10 crc kubenswrapper[4861]: E0129 06:36:10.116560 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:10 crc kubenswrapper[4861]: E0129 06:36:10.116738 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.165832 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.165908 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.165927 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.165951 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.165968 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.268703 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.268781 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.268798 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.268821 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.268838 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.371840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.371896 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.371909 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.371932 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.371944 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.475843 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.475893 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.475906 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.475922 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.475933 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.578733 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.579106 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.579194 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.579326 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.579420 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.683155 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.683744 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.683907 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.684046 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.684258 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.788710 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.789620 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.789771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.789916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.790216 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.895269 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.895373 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.895394 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.895433 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.895456 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.999367 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.999448 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.999470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.999501 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:10 crc kubenswrapper[4861]: I0129 06:36:10.999537 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:10Z","lastTransitionTime":"2026-01-29T06:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.086240 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 22:01:10.254638429 +0000 UTC Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.102551 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.102591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.102604 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.102623 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.102635 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.206442 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.206584 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.206608 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.206634 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.206651 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.309977 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.310036 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.310054 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.310106 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.310126 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.413603 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.413668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.413685 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.413709 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.413729 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.516551 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.516613 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.516634 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.516658 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.516676 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.619468 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.619561 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.619583 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.619681 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.619705 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.723660 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.723738 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.723759 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.723792 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.723814 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.827183 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.827252 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.827274 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.827304 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.827325 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.931057 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.931150 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.931169 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.931201 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:11 crc kubenswrapper[4861]: I0129 06:36:11.931219 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:11Z","lastTransitionTime":"2026-01-29T06:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.035635 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.035733 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.035760 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.035794 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.035818 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.086373 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 16:12:14.793433703 +0000 UTC Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.116475 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.116519 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.116523 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.116485 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:12 crc kubenswrapper[4861]: E0129 06:36:12.116765 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:12 crc kubenswrapper[4861]: E0129 06:36:12.116825 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:12 crc kubenswrapper[4861]: E0129 06:36:12.116637 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:12 crc kubenswrapper[4861]: E0129 06:36:12.116904 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.138713 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.138943 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.139126 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.139292 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.139438 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.244131 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.244213 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.244234 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.244263 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.244285 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.348191 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.348263 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.348281 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.348311 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.348333 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.452456 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.452530 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.452550 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.452579 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.452597 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.556756 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.556839 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.556859 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.556894 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.556914 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.661447 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.661558 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.661577 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.661606 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.661679 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.765540 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.765653 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.765678 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.765704 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.765724 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.869714 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.869784 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.869810 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.869843 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.869868 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.974522 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.974607 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.974628 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.974658 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:12 crc kubenswrapper[4861]: I0129 06:36:12.974683 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:12Z","lastTransitionTime":"2026-01-29T06:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.078263 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.078326 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.078350 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.078379 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.078396 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.087474 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 05:58:58.101194966 +0000 UTC Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.181565 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.181661 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.181680 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.181715 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.181738 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.286069 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.286184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.286208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.286243 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.286263 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.390536 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.390608 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.390628 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.390659 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.390680 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.494032 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.494312 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.494384 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.494457 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.494531 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.598584 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.598638 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.598649 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.598668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.598680 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.703753 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.703807 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.703820 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.703840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.703855 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.807342 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.807403 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.807425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.807450 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.807469 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.955292 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.955352 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.955369 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.955392 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:13 crc kubenswrapper[4861]: I0129 06:36:13.955410 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:13Z","lastTransitionTime":"2026-01-29T06:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.059943 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.060361 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.060557 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.060760 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.060915 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.088633 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 11:03:05.888116674 +0000 UTC Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.116055 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.116103 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.116243 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.116550 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:14 crc kubenswrapper[4861]: E0129 06:36:14.116721 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:14 crc kubenswrapper[4861]: E0129 06:36:14.116844 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:14 crc kubenswrapper[4861]: E0129 06:36:14.116967 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:14 crc kubenswrapper[4861]: E0129 06:36:14.117032 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.164942 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.165021 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.165042 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.165112 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.165140 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.268025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.268461 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.268629 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.268798 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.269229 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.373175 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.373654 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.373818 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.373975 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.374156 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.477633 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.477707 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.477731 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.477765 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.477796 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.580178 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.580252 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.580278 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.580308 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.580332 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.683110 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.683187 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.683204 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.683232 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.683250 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.787372 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.787464 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.787484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.787527 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.787546 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.891397 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.891955 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.892108 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.892540 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.892815 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.996493 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.996740 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.996799 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.996899 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:14 crc kubenswrapper[4861]: I0129 06:36:14.996963 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:14Z","lastTransitionTime":"2026-01-29T06:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.089809 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 20:14:27.348026326 +0000 UTC Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.100066 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.100152 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.100171 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.100195 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.100212 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.203928 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.203981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.203999 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.204024 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.204041 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.308030 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.308068 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.308107 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.308124 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.308137 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.411118 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.411201 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.411220 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.411250 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.411268 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.514485 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.514596 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.514617 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.514644 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.514664 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.618786 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.618857 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.618877 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.618903 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.618929 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.721906 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.721961 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.721976 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.721997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.722012 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.824511 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.824584 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.824603 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.824637 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.824662 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.927013 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.927056 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.927068 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.927108 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:15 crc kubenswrapper[4861]: I0129 06:36:15.927125 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:15Z","lastTransitionTime":"2026-01-29T06:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.029788 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.029860 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.029874 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.029895 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.029913 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.090103 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 01:07:24.436734746 +0000 UTC Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.116001 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.116030 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.116058 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:16 crc kubenswrapper[4861]: E0129 06:36:16.116157 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.116106 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:16 crc kubenswrapper[4861]: E0129 06:36:16.116310 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:16 crc kubenswrapper[4861]: E0129 06:36:16.116493 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:16 crc kubenswrapper[4861]: E0129 06:36:16.116632 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.132772 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.132840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.132857 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.132883 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.132901 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.236176 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.236233 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.236256 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.236285 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.236304 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.339566 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.339630 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.339644 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.339665 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.339684 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.450129 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.450191 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.450208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.450226 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.450241 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.554056 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.554160 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.554179 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.554208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.554228 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.657370 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.657433 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.657448 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.657471 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.657485 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.760490 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.760534 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.760546 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.760562 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.760581 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.862957 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.863005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.863016 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.863035 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.863049 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.966148 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.966189 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.966200 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.966215 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:16 crc kubenswrapper[4861]: I0129 06:36:16.966226 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:16Z","lastTransitionTime":"2026-01-29T06:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.069062 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.069121 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.069137 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.069152 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.069163 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.090591 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 17:52:27.650982828 +0000 UTC Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.172318 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.172419 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.172437 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.172456 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.172469 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.177828 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.177907 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.177955 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.177984 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.178001 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: E0129 06:36:17.202004 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:17Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.207550 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.207614 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.207635 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.207661 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.207682 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: E0129 06:36:17.225667 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:17Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.228852 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.228890 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.228901 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.228917 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.228927 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: E0129 06:36:17.242581 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:17Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.246714 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.246762 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.246773 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.246793 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.246808 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: E0129 06:36:17.257749 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:17Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.263632 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.263677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.263692 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.263714 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.263728 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: E0129 06:36:17.281510 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:17Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:17 crc kubenswrapper[4861]: E0129 06:36:17.281621 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.283292 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.283315 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.283325 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.283342 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.283353 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.386296 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.386361 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.386382 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.386417 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.386438 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.489958 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.490121 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.490143 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.490172 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.490192 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.593872 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.593995 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.594019 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.594049 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.594095 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.696286 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.696368 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.696390 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.696415 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.696433 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.799562 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.799613 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.799625 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.799645 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.799656 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.828467 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:17 crc kubenswrapper[4861]: E0129 06:36:17.828670 4861 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:36:17 crc kubenswrapper[4861]: E0129 06:36:17.828748 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs podName:fb22f8f6-1210-4f39-8712-d33efc26239c nodeName:}" failed. No retries permitted until 2026-01-29 06:36:49.82872839 +0000 UTC m=+101.500222937 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs") pod "network-metrics-daemon-rh69l" (UID: "fb22f8f6-1210-4f39-8712-d33efc26239c") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.903067 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.903141 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.903155 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.903178 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:17 crc kubenswrapper[4861]: I0129 06:36:17.903195 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:17Z","lastTransitionTime":"2026-01-29T06:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.006387 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.006456 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.006477 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.006504 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.006524 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.091492 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 06:40:02.542825 +0000 UTC Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.109842 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.109906 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.109926 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.109951 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.109971 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.115434 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.115493 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.115496 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.115575 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:18 crc kubenswrapper[4861]: E0129 06:36:18.115766 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:18 crc kubenswrapper[4861]: E0129 06:36:18.116264 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:18 crc kubenswrapper[4861]: E0129 06:36:18.116374 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:18 crc kubenswrapper[4861]: E0129 06:36:18.116799 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.117084 4861 scope.go:117] "RemoveContainer" containerID="a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed" Jan 29 06:36:18 crc kubenswrapper[4861]: E0129 06:36:18.117284 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.213390 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.213495 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.213512 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.213548 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.213566 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.317105 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.317166 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.317180 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.317204 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.317220 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.420447 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.420499 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.420510 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.420528 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.420538 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.524506 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.524757 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.524847 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.524917 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.524973 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.627902 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.627957 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.627970 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.627989 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.628004 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.730760 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.730866 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.730886 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.730922 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.730941 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.834351 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.834414 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.834433 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.834460 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.834479 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.937829 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.937923 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.937941 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.937972 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:18 crc kubenswrapper[4861]: I0129 06:36:18.937993 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:18Z","lastTransitionTime":"2026-01-29T06:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.041419 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.041469 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.041479 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.041496 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.041508 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.092176 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 07:03:38.569462444 +0000 UTC Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.141495 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.146431 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.146485 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.146503 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.146532 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.146552 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.160179 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.179914 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.195940 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.211520 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.235596 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.249014 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.249057 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.249089 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.249111 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.249128 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.255450 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.272121 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.284774 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.303067 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.318953 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.332287 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.352382 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.352490 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.352510 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.352537 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.352555 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.363129 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.377637 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.391111 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.404030 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.427199 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:19Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.455682 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.455733 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.455746 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.455766 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.455783 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.559632 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.559681 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.559691 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.559708 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.559722 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.662495 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.662597 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.662621 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.662653 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.662678 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.766856 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.766959 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.767008 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.767039 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.767064 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.870296 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.870499 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.870574 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.870647 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.870712 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.973017 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.973098 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.973112 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.973133 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:19 crc kubenswrapper[4861]: I0129 06:36:19.973146 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:19Z","lastTransitionTime":"2026-01-29T06:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.076957 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.077013 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.077025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.077043 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.077055 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.093355 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 15:20:41.158968872 +0000 UTC Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.115745 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.115775 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:20 crc kubenswrapper[4861]: E0129 06:36:20.115865 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.115800 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.115799 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:20 crc kubenswrapper[4861]: E0129 06:36:20.115992 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:20 crc kubenswrapper[4861]: E0129 06:36:20.116148 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:20 crc kubenswrapper[4861]: E0129 06:36:20.116256 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.179653 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.179691 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.179702 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.179716 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.179728 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.282424 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.282477 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.282489 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.282508 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.282524 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.384579 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.384638 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.384660 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.384688 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.384713 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.487409 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.487504 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.487534 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.487610 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.487641 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.590932 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.591025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.591039 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.591059 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.591093 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.693626 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.693675 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.693688 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.693707 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.693718 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.796208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.796596 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.796716 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.796819 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.796928 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.900520 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.900562 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.900572 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.900588 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:20 crc kubenswrapper[4861]: I0129 06:36:20.900599 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:20Z","lastTransitionTime":"2026-01-29T06:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.003388 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.003431 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.003444 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.003460 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.003473 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.093852 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 13:34:40.213299591 +0000 UTC Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.106262 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.106335 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.106355 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.106384 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.106409 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.209960 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.210314 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.210382 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.210450 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.210510 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.314620 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.314694 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.314714 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.314742 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.314763 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.417829 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.417909 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.417926 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.417951 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.417973 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.520667 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.520711 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.520728 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.520748 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.520764 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.623441 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.623501 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.623519 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.623545 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.623563 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.640802 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/0.log" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.640864 4861 generic.go:334] "Generic (PLEG): container finished" podID="da8019d1-2d2c-493d-b80f-1d566eec9475" containerID="740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8" exitCode=1 Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.640901 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4942p" event={"ID":"da8019d1-2d2c-493d-b80f-1d566eec9475","Type":"ContainerDied","Data":"740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.641405 4861 scope.go:117] "RemoveContainer" containerID="740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.660299 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.679574 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.693300 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.711651 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.727432 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.727548 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.727581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.727592 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.727612 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.727627 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.745876 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.760608 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.783667 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.795198 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.807193 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.821357 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.831008 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.831049 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.831061 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.831103 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.831118 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.847744 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.863181 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.880460 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.897965 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.909034 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.923383 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:21Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.934389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.934536 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.934550 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.934568 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:21 crc kubenswrapper[4861]: I0129 06:36:21.934583 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:21Z","lastTransitionTime":"2026-01-29T06:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.036812 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.036841 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.036849 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.036862 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.036881 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.095374 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 23:46:52.514844596 +0000 UTC Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.116388 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.116432 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:22 crc kubenswrapper[4861]: E0129 06:36:22.116507 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:22 crc kubenswrapper[4861]: E0129 06:36:22.116616 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.116688 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:22 crc kubenswrapper[4861]: E0129 06:36:22.116920 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.117146 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:22 crc kubenswrapper[4861]: E0129 06:36:22.117325 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.139369 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.139412 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.139425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.139444 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.139456 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.242421 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.242471 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.242482 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.242502 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.242519 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.344997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.345169 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.345199 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.345229 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.345249 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.449429 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.449506 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.449531 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.449571 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.449598 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.553230 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.553315 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.553335 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.553364 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.553385 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.646953 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/0.log" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.647061 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4942p" event={"ID":"da8019d1-2d2c-493d-b80f-1d566eec9475","Type":"ContainerStarted","Data":"3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.656423 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.656486 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.656504 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.656529 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.656548 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.671295 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.690891 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.718162 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.729552 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.742211 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.757259 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.760740 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.760767 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.760776 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.760796 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.760807 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.781938 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.800827 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.818123 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.839485 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.853109 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.863771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.863820 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.863838 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.863867 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.863887 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.866501 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.885236 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.900714 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.916693 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.930602 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.944516 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:22Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.968463 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.968795 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.968946 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.969051 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:22 crc kubenswrapper[4861]: I0129 06:36:22.969212 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:22Z","lastTransitionTime":"2026-01-29T06:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.073205 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.073266 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.073284 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.073308 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.073327 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.095778 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 14:29:39.719974238 +0000 UTC Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.176739 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.176802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.176822 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.176847 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.176865 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.279454 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.279541 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.279567 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.279599 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.279624 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.382155 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.382528 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.382663 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.382834 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.382969 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.486487 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.486570 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.486595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.486625 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.486659 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.589603 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.589666 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.589682 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.589705 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.589726 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.692750 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.692827 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.692845 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.692873 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.692893 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.796367 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.796413 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.796423 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.796440 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.796450 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.899762 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.899828 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.899847 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.899889 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:23 crc kubenswrapper[4861]: I0129 06:36:23.899915 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:23Z","lastTransitionTime":"2026-01-29T06:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.003674 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.003772 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.003799 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.003836 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.003859 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.095977 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 23:23:53.892238271 +0000 UTC Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.106751 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.106816 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.106827 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.106846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.106858 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.116041 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.116054 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.116096 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.116171 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:24 crc kubenswrapper[4861]: E0129 06:36:24.116237 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:24 crc kubenswrapper[4861]: E0129 06:36:24.116380 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:24 crc kubenswrapper[4861]: E0129 06:36:24.116446 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:24 crc kubenswrapper[4861]: E0129 06:36:24.116528 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.210597 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.210770 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.210793 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.210825 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.210849 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.314529 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.314587 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.314603 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.314658 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.314679 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.419252 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.419312 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.419330 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.419355 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.419374 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.522315 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.522399 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.522416 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.522448 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.522467 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.625919 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.626000 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.626025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.626060 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.626116 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.729620 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.729699 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.729718 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.729752 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.729779 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.833862 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.833932 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.833950 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.833980 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.834000 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.938037 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.938150 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.938169 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.938203 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:24 crc kubenswrapper[4861]: I0129 06:36:24.938226 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:24Z","lastTransitionTime":"2026-01-29T06:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.041895 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.041974 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.041992 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.042026 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.042047 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.097012 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 08:12:55.744644325 +0000 UTC Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.145686 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.145779 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.145802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.145836 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.145868 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.249980 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.250064 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.250184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.250236 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.250264 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.353417 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.353463 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.353475 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.353497 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.353509 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.456612 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.456667 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.456687 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.456707 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.456720 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.560524 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.560587 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.560604 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.560629 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.560648 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.663540 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.663639 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.663664 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.663693 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.663717 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.767293 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.767372 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.767391 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.767429 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.767452 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.870708 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.870783 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.870807 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.870848 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.870872 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.974852 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.974928 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.974948 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.974979 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:25 crc kubenswrapper[4861]: I0129 06:36:25.975007 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:25Z","lastTransitionTime":"2026-01-29T06:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.078509 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.078593 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.078614 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.078646 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.078668 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.098064 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 05:15:57.947941624 +0000 UTC Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.115741 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.115780 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.115839 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.115923 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:26 crc kubenswrapper[4861]: E0129 06:36:26.116040 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:26 crc kubenswrapper[4861]: E0129 06:36:26.116305 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:26 crc kubenswrapper[4861]: E0129 06:36:26.116441 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:26 crc kubenswrapper[4861]: E0129 06:36:26.116649 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.181560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.181631 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.181653 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.181681 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.181702 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.284536 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.284609 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.284627 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.284655 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.284675 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.388553 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.388622 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.388643 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.388671 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.388697 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.492761 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.492825 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.492846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.492876 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.492896 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.596413 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.596516 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.596541 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.596575 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.596596 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.699342 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.699386 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.699403 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.699427 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.699444 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.802664 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.802720 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.802739 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.802764 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.802782 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.906294 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.906343 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.906360 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.906380 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:26 crc kubenswrapper[4861]: I0129 06:36:26.906397 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:26Z","lastTransitionTime":"2026-01-29T06:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.009464 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.009533 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.009551 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.009577 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.009596 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.098853 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 02:42:48.734788218 +0000 UTC Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.112420 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.112493 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.112519 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.112554 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.112582 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.215388 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.215458 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.215476 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.215501 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.215520 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.318715 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.318787 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.318807 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.318835 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.318858 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.422266 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.422341 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.422360 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.422390 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.422409 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.525064 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.525166 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.525184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.525210 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.525227 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.610421 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.610487 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.610505 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.610532 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.610549 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: E0129 06:36:27.630576 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:27Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.635602 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.635668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.635686 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.635711 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.635730 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: E0129 06:36:27.652441 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:27Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.657199 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.657259 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.657280 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.657305 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.657322 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: E0129 06:36:27.682951 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:27Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.688676 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.688738 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.688756 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.688783 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.688801 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: E0129 06:36:27.714881 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:27Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.719039 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.719401 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.719677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.719881 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.719993 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: E0129 06:36:27.734021 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:27Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:27 crc kubenswrapper[4861]: E0129 06:36:27.734518 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.736317 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.736440 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.736558 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.736648 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.736736 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.838684 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.838988 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.839099 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.839173 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.839229 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.941892 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.941962 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.941981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.942012 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:27 crc kubenswrapper[4861]: I0129 06:36:27.942032 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:27Z","lastTransitionTime":"2026-01-29T06:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.045349 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.045415 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.045433 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.045466 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.045486 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.099655 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 13:45:00.444492134 +0000 UTC Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.115411 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.115453 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.115429 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.115653 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:28 crc kubenswrapper[4861]: E0129 06:36:28.115911 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:28 crc kubenswrapper[4861]: E0129 06:36:28.116013 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:28 crc kubenswrapper[4861]: E0129 06:36:28.116165 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:28 crc kubenswrapper[4861]: E0129 06:36:28.116288 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.148488 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.148532 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.148547 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.148566 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.148582 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.251273 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.251335 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.251349 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.251371 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.251383 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.354014 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.354096 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.354109 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.354127 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.354139 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.456741 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.456788 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.456797 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.456811 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.456824 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.559560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.559627 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.559648 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.559678 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.559699 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.662567 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.662710 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.662730 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.662757 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.662777 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.766360 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.766424 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.766442 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.766465 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.766483 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.869930 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.869992 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.870005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.870024 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.870038 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.972935 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.972997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.973015 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.973040 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:28 crc kubenswrapper[4861]: I0129 06:36:28.973062 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:28Z","lastTransitionTime":"2026-01-29T06:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.077276 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.077379 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.077405 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.077438 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.077465 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.100902 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 09:59:02.405335065 +0000 UTC Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.138141 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.160809 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.175490 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.180295 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.180354 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.180373 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.180404 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.180422 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.192797 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.211842 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.236863 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.253484 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.275306 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.282731 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.282787 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.282806 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.282830 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.282848 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.292379 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.308590 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.328367 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.344256 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.361950 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.382580 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.385964 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.386028 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.386050 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.386095 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.386110 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.400461 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.418833 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.448212 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:29Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.489379 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.489430 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.489449 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.489475 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.489494 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.593013 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.593132 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.593158 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.593190 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.593215 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.695863 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.695956 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.695981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.696014 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.696042 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.798942 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.799022 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.799048 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.799112 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.799139 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.902337 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.902405 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.902425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.902511 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:29 crc kubenswrapper[4861]: I0129 06:36:29.902540 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:29Z","lastTransitionTime":"2026-01-29T06:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.005926 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.006000 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.006029 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.006061 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.006145 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.101716 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 00:51:31.690544604 +0000 UTC Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.108627 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.108668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.108682 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.108698 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.108711 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.115734 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.115822 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:30 crc kubenswrapper[4861]: E0129 06:36:30.115840 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.115748 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.115958 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:30 crc kubenswrapper[4861]: E0129 06:36:30.116206 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:30 crc kubenswrapper[4861]: E0129 06:36:30.116242 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:30 crc kubenswrapper[4861]: E0129 06:36:30.116293 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.211061 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.211160 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.211179 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.211206 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.211231 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.314716 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.314782 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.314799 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.314825 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.314844 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.418353 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.418408 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.418426 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.418452 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.418471 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.521560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.521648 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.521672 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.521705 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.521727 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.627999 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.628156 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.628183 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.628209 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.628234 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.731374 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.731434 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.731452 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.731475 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.731506 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.835257 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.835312 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.835331 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.835356 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.835374 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.938211 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.938307 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.938335 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.938377 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:30 crc kubenswrapper[4861]: I0129 06:36:30.938402 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:30Z","lastTransitionTime":"2026-01-29T06:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.041466 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.041531 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.041552 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.041576 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.041592 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.102756 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 08:03:30.132487324 +0000 UTC Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.117458 4861 scope.go:117] "RemoveContainer" containerID="a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.144868 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.144929 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.144947 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.144972 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.144993 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.247053 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.247185 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.247211 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.247243 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.247271 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.350461 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.350819 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.350837 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.350863 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.350880 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.454327 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.454586 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.454655 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.454720 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.454778 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.557947 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.557999 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.558016 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.558044 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.558061 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.660871 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.661142 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.661268 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.661362 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.661468 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.687999 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/2.log" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.691526 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.692289 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.721508 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.750169 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.764174 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.764229 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.764248 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.764273 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.764290 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.768376 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.779603 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.793220 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.803230 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.814349 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.832230 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.848465 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.866204 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.867059 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.867135 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.867154 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.867179 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.867196 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.877677 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.894279 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.907698 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.917608 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.929699 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.957950 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.969842 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.969887 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.969901 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.969920 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.969931 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:31Z","lastTransitionTime":"2026-01-29T06:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:31 crc kubenswrapper[4861]: I0129 06:36:31.976875 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:31Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.072430 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.072491 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.072508 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.072533 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.072551 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.103404 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 02:02:48.239030692 +0000 UTC Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.116368 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.116385 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.116497 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:32 crc kubenswrapper[4861]: E0129 06:36:32.116505 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:32 crc kubenswrapper[4861]: E0129 06:36:32.116577 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:32 crc kubenswrapper[4861]: E0129 06:36:32.116718 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.117028 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:32 crc kubenswrapper[4861]: E0129 06:36:32.117312 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.175545 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.175576 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.175584 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.175597 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.175606 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.279289 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.279375 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.279395 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.279430 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.279450 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.383454 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.383531 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.383556 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.383622 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.383641 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.486397 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.486458 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.486470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.486494 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.486515 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.589787 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.589846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.589859 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.589879 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.589892 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.693676 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.693745 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.693762 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.693788 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.693806 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.698871 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/3.log" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.700124 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/2.log" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.704654 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" exitCode=1 Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.704711 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.704798 4861 scope.go:117] "RemoveContainer" containerID="a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.706235 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:36:32 crc kubenswrapper[4861]: E0129 06:36:32.706613 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.731170 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.778553 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.797170 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.797311 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.797334 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.797359 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.797379 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.804118 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.823048 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.840890 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.861143 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.881896 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.901708 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.901756 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.901777 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.901802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.901820 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:32Z","lastTransitionTime":"2026-01-29T06:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.907057 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.910895 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:36:32 crc kubenswrapper[4861]: E0129 06:36:32.911329 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.911292622 +0000 UTC m=+148.582787219 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.925872 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.948673 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.968172 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:32 crc kubenswrapper[4861]: I0129 06:36:32.989197 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.007317 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.007370 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.007389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.007417 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.007435 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.012491 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.012663 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.012746 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.012782 4861 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.012802 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.012860 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.012836904 +0000 UTC m=+148.684331571 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.012945 4861 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013039 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013062 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.013028679 +0000 UTC m=+148.684523266 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.012956 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013108 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013212 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013235 4861 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013286 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.013270236 +0000 UTC m=+148.684764823 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013117 4861 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013322 4861 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.013361 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.013350278 +0000 UTC m=+148.684844875 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.045213 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4731af291ecd1e5e9eed12ff3785d7f3022196ddfd2f5650c587bd32ba046ed\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:04Z\\\",\\\"message\\\":\\\":Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0129 06:36:04.169253 6534 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI0129 06:36:04.170863 6534 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI0129 06:36:04.170874 6534 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI0129 06:36:04.169323 6534 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nF0129 06:36:04.169439 6534 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:32Z\\\",\\\"message\\\":\\\"r Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0129 06:36:32.117107 6936 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117115 6936 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117120 6936 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-wkh9p in node crc\\\\nF0129 06:36:32.117123 6936 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:36\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.063307 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.084749 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.103993 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 11:19:30.828610723 +0000 UTC Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.110218 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.110266 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.110277 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.110295 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.110308 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.110321 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.217683 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.217765 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.217792 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.217841 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.217867 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.321042 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.321129 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.321148 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.321173 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.321192 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.424607 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.424674 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.424698 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.424730 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.424748 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.527906 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.527964 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.527987 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.528015 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.528040 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.632794 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.632917 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.632944 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.632978 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.633000 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.711914 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/3.log" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.717538 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:36:33 crc kubenswrapper[4861]: E0129 06:36:33.717887 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.735952 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.736012 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.736032 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.736056 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.736103 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.741598 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.764317 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.784422 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.801434 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.824486 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.839654 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.839747 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.839770 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.839802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.839827 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.846425 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.870159 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.904301 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:32Z\\\",\\\"message\\\":\\\"r Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0129 06:36:32.117107 6936 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117115 6936 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117120 6936 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-wkh9p in node crc\\\\nF0129 06:36:32.117123 6936 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:36\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.925848 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.943761 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.943847 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.943867 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.943896 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.943915 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:33Z","lastTransitionTime":"2026-01-29T06:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.945453 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.966265 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:33 crc kubenswrapper[4861]: I0129 06:36:33.992263 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:33Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.019121 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.038644 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.046841 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.046897 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.046914 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.046937 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.046955 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.060963 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.078238 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.096135 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:34Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.104485 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 18:08:13.475392709 +0000 UTC Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.115898 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.115938 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.115978 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:34 crc kubenswrapper[4861]: E0129 06:36:34.116120 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.116143 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:34 crc kubenswrapper[4861]: E0129 06:36:34.116243 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:34 crc kubenswrapper[4861]: E0129 06:36:34.116338 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:34 crc kubenswrapper[4861]: E0129 06:36:34.116524 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.149481 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.149512 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.149522 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.149554 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.149565 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.252319 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.252384 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.252401 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.252425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.252443 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.356322 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.356386 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.356403 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.356426 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.356443 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.459178 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.459236 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.459249 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.459269 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.459550 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.563612 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.563678 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.563736 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.563766 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.563821 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.667600 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.667973 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.668005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.668039 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.668065 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.777360 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.777436 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.777543 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.777684 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.777733 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.881010 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.881160 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.881180 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.881208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.881228 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.983846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.983932 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.983952 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.983985 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:34 crc kubenswrapper[4861]: I0129 06:36:34.984009 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:34Z","lastTransitionTime":"2026-01-29T06:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.087470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.087539 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.087560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.087590 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.087612 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.104919 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 21:39:29.107783169 +0000 UTC Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.190160 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.190237 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.190256 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.190288 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.190307 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.294391 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.294434 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.294445 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.294463 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.294476 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.397940 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.398004 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.398029 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.398067 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.398125 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.501364 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.501428 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.501453 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.501482 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.501502 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.604965 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.605064 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.605111 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.605143 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.605163 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.708585 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.708676 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.708708 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.708745 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.708770 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.811970 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.812043 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.812065 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.812143 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.812165 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.915649 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.915719 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.915737 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.915763 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:35 crc kubenswrapper[4861]: I0129 06:36:35.915781 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:35Z","lastTransitionTime":"2026-01-29T06:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.018595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.018679 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.018703 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.018740 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.018767 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.105978 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 23:13:50.035664112 +0000 UTC Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.116416 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.116492 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.116623 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:36 crc kubenswrapper[4861]: E0129 06:36:36.116782 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.116858 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:36 crc kubenswrapper[4861]: E0129 06:36:36.116898 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:36 crc kubenswrapper[4861]: E0129 06:36:36.117113 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:36 crc kubenswrapper[4861]: E0129 06:36:36.117414 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.122395 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.122451 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.122472 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.122493 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.122511 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.225914 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.225986 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.226011 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.226042 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.226068 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.328678 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.328764 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.328809 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.328867 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.328894 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.431787 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.431842 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.431860 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.431885 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.431903 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.535066 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.535155 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.535173 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.535199 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.535216 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.650514 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.650579 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.650600 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.650626 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.650647 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.754010 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.754067 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.754112 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.754137 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.754154 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.857356 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.857441 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.857460 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.857486 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.857506 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.961803 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.961863 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.961886 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.961916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:36 crc kubenswrapper[4861]: I0129 06:36:36.961936 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:36Z","lastTransitionTime":"2026-01-29T06:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.065846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.065926 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.065947 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.065980 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.066002 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.107040 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 05:13:35.289916027 +0000 UTC Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.169431 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.170176 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.170336 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.170487 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.170661 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.275654 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.275732 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.275749 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.275784 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.275799 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.381109 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.381191 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.381205 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.381227 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.381255 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.484829 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.484894 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.484912 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.484937 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.484956 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.588842 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.588939 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.588960 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.589018 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.589038 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.692347 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.692407 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.692426 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.692451 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.692471 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.769005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.769156 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.769182 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.769213 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.769235 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: E0129 06:36:37.793033 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.799695 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.799776 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.799794 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.800310 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.800332 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: E0129 06:36:37.858164 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.862987 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.863109 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.863132 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.863159 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.863209 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: E0129 06:36:37.884175 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.888462 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.888549 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.888568 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.888622 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.888640 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: E0129 06:36:37.909667 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.913921 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.913953 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.913963 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.913978 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.913987 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:37 crc kubenswrapper[4861]: E0129 06:36:37.932457 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:37Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:37 crc kubenswrapper[4861]: E0129 06:36:37.932731 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.935949 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.936009 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.936033 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.936062 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:37 crc kubenswrapper[4861]: I0129 06:36:37.936162 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:37Z","lastTransitionTime":"2026-01-29T06:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.039740 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.039792 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.039809 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.039831 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.039849 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.107709 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 22:44:09.123022323 +0000 UTC Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.116282 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.116363 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.116412 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:38 crc kubenswrapper[4861]: E0129 06:36:38.116629 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.116673 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:38 crc kubenswrapper[4861]: E0129 06:36:38.116808 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:38 crc kubenswrapper[4861]: E0129 06:36:38.116926 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:38 crc kubenswrapper[4861]: E0129 06:36:38.117051 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.142506 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.142552 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.142569 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.142591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.142645 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.244862 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.244918 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.244934 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.244956 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.244974 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.347950 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.348009 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.348026 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.348051 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.348105 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.451330 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.451382 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.451404 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.451428 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.451444 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.554632 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.554705 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.554729 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.554761 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.554782 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.658439 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.658495 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.658507 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.658527 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.658540 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.761924 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.761978 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.761998 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.762024 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.762043 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.866717 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.866764 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.866780 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.866803 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.866820 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.977423 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.977526 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.977548 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.977614 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:38 crc kubenswrapper[4861]: I0129 06:36:38.977635 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:38Z","lastTransitionTime":"2026-01-29T06:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.081678 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.081779 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.081807 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.081848 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.081874 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.108439 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 01:54:35.428270285 +0000 UTC Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.139290 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.167688 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.184774 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.184894 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.184912 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.184943 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.184966 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.190758 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.210463 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.234177 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.254619 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.272656 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.288127 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.288185 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.288202 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.288227 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.288244 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.292777 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.311543 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.330005 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.345650 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.364948 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.379984 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.391393 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.391457 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.391478 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.391506 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.391535 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.400706 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.428832 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:32Z\\\",\\\"message\\\":\\\"r Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0129 06:36:32.117107 6936 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117115 6936 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117120 6936 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-wkh9p in node crc\\\\nF0129 06:36:32.117123 6936 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:36\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.447376 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.465857 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:39Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.494737 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.494807 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.494825 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.494853 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.494875 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.597845 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.597916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.597936 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.597965 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.597985 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.701125 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.701232 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.701253 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.701279 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.701298 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.803996 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.804092 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.804108 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.804127 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.804139 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.908598 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.908663 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.908683 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.908708 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:39 crc kubenswrapper[4861]: I0129 06:36:39.908728 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:39Z","lastTransitionTime":"2026-01-29T06:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.012168 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.012223 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.012242 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.012265 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.012283 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.108845 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 12:36:50.096766208 +0000 UTC Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115189 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115265 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115284 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115312 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115335 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115477 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:40 crc kubenswrapper[4861]: E0129 06:36:40.115639 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115654 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115677 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.115718 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:40 crc kubenswrapper[4861]: E0129 06:36:40.116347 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:40 crc kubenswrapper[4861]: E0129 06:36:40.116458 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:40 crc kubenswrapper[4861]: E0129 06:36:40.116531 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.219330 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.219403 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.219420 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.219449 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.219471 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.323281 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.323365 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.323391 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.323421 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.323440 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.426999 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.427066 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.427142 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.427174 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.427227 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.530588 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.530650 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.530668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.530695 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.530718 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.633606 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.633685 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.633705 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.633740 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.633762 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.737451 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.737526 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.737545 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.737572 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.737591 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.840377 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.840472 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.840503 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.840536 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.840567 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.944246 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.944313 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.944335 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.944363 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:40 crc kubenswrapper[4861]: I0129 06:36:40.944383 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:40Z","lastTransitionTime":"2026-01-29T06:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.047937 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.048008 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.048027 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.048158 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.048191 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.109767 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 00:04:19.573084235 +0000 UTC Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.151561 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.151652 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.151678 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.151713 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.151760 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.254921 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.255007 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.255025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.255053 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.255104 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.358955 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.359017 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.359037 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.359059 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.359095 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.462610 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.462918 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.462931 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.462949 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.462961 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.565908 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.565951 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.565960 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.565976 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.565987 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.669652 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.669703 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.669720 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.669743 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.669760 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.773557 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.773656 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.773677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.773715 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.773745 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.877006 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.877098 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.877126 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.877158 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.877182 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.980347 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.980411 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.980429 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.980452 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:41 crc kubenswrapper[4861]: I0129 06:36:41.980470 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:41Z","lastTransitionTime":"2026-01-29T06:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.084772 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.084830 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.084862 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.084887 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.084904 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.109885 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 09:38:22.026833243 +0000 UTC Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.116202 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:42 crc kubenswrapper[4861]: E0129 06:36:42.116395 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.116477 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.116490 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.116863 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:42 crc kubenswrapper[4861]: E0129 06:36:42.117111 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:42 crc kubenswrapper[4861]: E0129 06:36:42.117293 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:42 crc kubenswrapper[4861]: E0129 06:36:42.117402 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.134167 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.187613 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.187711 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.187737 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.187777 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.187813 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.291820 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.291888 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.291907 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.291933 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.291951 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.394751 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.394788 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.394797 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.394813 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.394823 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.498207 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.498257 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.498278 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.498303 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.498320 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.602234 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.602274 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.602283 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.602297 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.602307 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.704911 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.705032 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.705054 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.705099 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.705116 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.808679 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.808739 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.808757 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.808785 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.808804 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.911981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.912054 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.912116 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.912147 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:42 crc kubenswrapper[4861]: I0129 06:36:42.912165 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:42Z","lastTransitionTime":"2026-01-29T06:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.016049 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.016161 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.016186 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.016214 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.016240 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.110895 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 15:07:31.685308688 +0000 UTC Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.120487 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.120554 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.120575 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.120603 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.120623 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.223771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.223844 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.223869 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.223900 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.223920 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.327380 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.327455 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.327473 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.327499 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.327517 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.429991 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.430048 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.430061 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.430115 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.430132 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.532780 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.532838 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.532851 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.532867 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.532876 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.635702 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.635764 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.635783 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.635809 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.635827 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.738700 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.738753 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.738766 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.738786 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.738802 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.843350 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.843428 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.843453 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.843485 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.843510 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.947334 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.947422 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.947442 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.947475 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:43 crc kubenswrapper[4861]: I0129 06:36:43.947502 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:43Z","lastTransitionTime":"2026-01-29T06:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.050446 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.050518 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.050538 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.050563 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.050581 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.112035 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 23:20:22.241077779 +0000 UTC Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.116350 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:44 crc kubenswrapper[4861]: E0129 06:36:44.116501 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.116524 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.116543 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.116531 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:44 crc kubenswrapper[4861]: E0129 06:36:44.116893 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:44 crc kubenswrapper[4861]: E0129 06:36:44.117173 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:44 crc kubenswrapper[4861]: E0129 06:36:44.117259 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.153339 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.153389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.153405 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.153430 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.153448 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.255460 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.255543 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.255564 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.255592 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.255616 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.358632 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.358722 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.358754 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.358791 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.358815 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.463290 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.463363 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.463382 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.463412 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.463431 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.566548 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.566626 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.566643 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.566674 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.566696 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.669473 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.669546 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.669591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.669627 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.669649 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.774178 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.774306 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.774327 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.774353 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.774371 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.877171 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.877249 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.877262 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.877279 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.877294 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.980668 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.980748 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.980769 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.980800 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:44 crc kubenswrapper[4861]: I0129 06:36:44.980825 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:44Z","lastTransitionTime":"2026-01-29T06:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.084044 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.084137 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.084155 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.084180 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.084198 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.112812 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 15:12:57.601355102 +0000 UTC Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.187952 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.188004 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.188019 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.188038 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.188051 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.290395 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.290454 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.290473 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.290497 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.290515 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.393324 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.393384 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.393403 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.393425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.393443 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.496840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.496888 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.496909 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.496932 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.496950 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.600318 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.600383 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.600400 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.600427 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.600444 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.704410 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.704468 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.704484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.704507 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.704528 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.806889 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.806957 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.806969 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.806987 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.807000 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.910797 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.910869 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.910889 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.910917 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:45 crc kubenswrapper[4861]: I0129 06:36:45.910935 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:45Z","lastTransitionTime":"2026-01-29T06:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.014151 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.014218 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.014235 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.014258 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.014277 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.113905 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 21:12:53.45690567 +0000 UTC Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.115492 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.115560 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.115574 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.115679 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:46 crc kubenswrapper[4861]: E0129 06:36:46.115773 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:46 crc kubenswrapper[4861]: E0129 06:36:46.115854 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:46 crc kubenswrapper[4861]: E0129 06:36:46.116560 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:46 crc kubenswrapper[4861]: E0129 06:36:46.116667 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.116837 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:36:46 crc kubenswrapper[4861]: E0129 06:36:46.117062 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.118169 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.118204 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.118212 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.118228 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.118237 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.221575 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.221652 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.221676 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.221710 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.221735 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.324829 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.324895 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.324913 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.324939 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.324962 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.428956 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.429028 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.429050 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.429109 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.429128 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.532528 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.532595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.532612 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.532643 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.532665 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.635731 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.635804 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.635827 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.635886 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.635909 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.740624 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.740701 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.740721 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.740751 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.740770 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.844643 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.844709 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.844727 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.844753 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.844780 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.948630 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.948707 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.948731 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.948760 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:46 crc kubenswrapper[4861]: I0129 06:36:46.948784 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:46Z","lastTransitionTime":"2026-01-29T06:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.051409 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.051516 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.051535 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.051559 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.051578 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.114194 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 20:43:54.480944286 +0000 UTC Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.154223 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.154293 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.154310 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.154335 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.154352 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.257529 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.257606 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.257625 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.257651 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.257670 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.361867 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.361938 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.361964 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.361997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.362015 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.464671 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.464753 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.464777 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.464806 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.464826 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.568039 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.568135 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.568153 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.568179 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.568195 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.670994 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.671050 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.671066 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.671130 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.671148 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.774466 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.774548 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.774567 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.774595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.774617 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.878642 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.878692 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.878708 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.878730 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.878748 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.982062 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.982186 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.982210 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.982245 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:47 crc kubenswrapper[4861]: I0129 06:36:47.982270 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:47Z","lastTransitionTime":"2026-01-29T06:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.085328 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.085470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.085492 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.085524 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.085546 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.115052 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 22:06:48.136362357 +0000 UTC Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.116360 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.116443 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.116495 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.116448 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.116667 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.116846 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.116972 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.117158 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.188920 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.188999 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.189020 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.189049 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.189069 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.217777 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.217860 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.217885 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.217916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.217934 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.241563 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.247276 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.247351 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.247371 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.247404 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.247422 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.265743 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.271864 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.271951 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.271979 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.272016 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.272045 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.294312 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.299714 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.299767 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.299783 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.299806 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.299820 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.320657 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.325611 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.325685 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.325703 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.325730 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.325748 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.346739 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:48Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:48 crc kubenswrapper[4861]: E0129 06:36:48.346973 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.349128 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.349172 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.349190 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.349215 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.349232 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.452122 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.452194 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.452207 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.452223 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.452234 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.555413 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.555504 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.555522 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.555544 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.555561 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.658530 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.658570 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.658581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.658598 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.658609 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.760892 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.760962 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.760982 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.761010 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.761032 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.864320 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.864396 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.864422 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.864457 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.864479 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.967642 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.967710 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.967728 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.967754 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:48 crc kubenswrapper[4861]: I0129 06:36:48.967774 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:48Z","lastTransitionTime":"2026-01-29T06:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.071277 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.071345 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.071363 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.071389 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.071407 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.115755 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 02:25:29.81742343 +0000 UTC Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.133682 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ab4064a4-3eb6-40a4-8265-659277a0c89c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44f4b84198cb878a248b3daa7c766de056644c4308280a03f6ebb1d9221358d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b21d63e1683dcdd8f31ab08fb5ece2680c36d64e40f339a74e614146ceb297b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b21d63e1683dcdd8f31ab08fb5ece2680c36d64e40f339a74e614146ceb297b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.152367 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.173764 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.176302 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.176349 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.176367 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.176393 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.176413 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.206397 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:32Z\\\",\\\"message\\\":\\\"r Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0129 06:36:32.117107 6936 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117115 6936 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117120 6936 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-wkh9p in node crc\\\\nF0129 06:36:32.117123 6936 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:36\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.224661 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.248762 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.270618 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.279578 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.279650 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.279708 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.279743 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.279770 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.294265 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.315552 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.333671 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.351232 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.369777 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.382662 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.382727 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.382756 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.382792 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.382812 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.385048 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.407303 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.427920 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.448189 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.467730 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.486295 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.486369 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.486393 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.486420 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.486438 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.486908 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:49Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.589852 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.589949 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.589973 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.590013 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.590037 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.693976 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.694058 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.694119 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.694157 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.694179 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.798146 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.798219 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.798237 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.798260 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.798276 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.903517 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.903591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.903617 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.903662 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.903685 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:49Z","lastTransitionTime":"2026-01-29T06:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:49 crc kubenswrapper[4861]: I0129 06:36:49.925280 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:49 crc kubenswrapper[4861]: E0129 06:36:49.925440 4861 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:36:49 crc kubenswrapper[4861]: E0129 06:36:49.925540 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs podName:fb22f8f6-1210-4f39-8712-d33efc26239c nodeName:}" failed. No retries permitted until 2026-01-29 06:37:53.925515436 +0000 UTC m=+165.597010023 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs") pod "network-metrics-daemon-rh69l" (UID: "fb22f8f6-1210-4f39-8712-d33efc26239c") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.006935 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.006986 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.007002 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.007025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.007043 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.110942 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.111008 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.111024 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.111048 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.111065 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.116147 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 12:56:48.333906055 +0000 UTC Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.116333 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.116388 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.116389 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.116333 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:50 crc kubenswrapper[4861]: E0129 06:36:50.116482 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:50 crc kubenswrapper[4861]: E0129 06:36:50.116632 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:50 crc kubenswrapper[4861]: E0129 06:36:50.116829 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:50 crc kubenswrapper[4861]: E0129 06:36:50.116892 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.214145 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.214214 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.214233 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.214257 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.214275 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.317220 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.317280 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.317301 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.317324 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.317343 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.420405 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.420464 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.420482 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.420509 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.420527 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.523516 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.523562 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.523607 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.523637 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.523656 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.626413 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.626461 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.626473 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.626491 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.626506 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.729854 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.729902 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.729919 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.729946 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.729963 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.832840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.833017 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.833038 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.833065 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.833116 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.936192 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.936249 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.936269 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.936295 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:50 crc kubenswrapper[4861]: I0129 06:36:50.936315 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:50Z","lastTransitionTime":"2026-01-29T06:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.038856 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.038907 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.038926 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.038948 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.038965 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.116695 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 07:35:48.524248249 +0000 UTC Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.141542 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.141586 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.141596 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.141613 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.141626 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.244735 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.244795 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.244814 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.244835 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.244851 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.347644 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.347693 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.347707 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.347725 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.347737 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.451366 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.451421 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.451443 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.451474 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.451495 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.553829 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.553898 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.553979 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.554020 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.554045 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.657150 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.657193 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.657203 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.657220 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.657232 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.783579 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.783619 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.783633 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.783659 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.783671 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.886744 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.886784 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.886795 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.886812 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.886827 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.989639 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.989705 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.989722 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.989772 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:51 crc kubenswrapper[4861]: I0129 06:36:51.989794 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:51Z","lastTransitionTime":"2026-01-29T06:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.092575 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.092655 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.092683 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.092711 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.092732 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.115766 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.115813 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.115895 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:52 crc kubenswrapper[4861]: E0129 06:36:52.115918 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.115790 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:52 crc kubenswrapper[4861]: E0129 06:36:52.116115 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:52 crc kubenswrapper[4861]: E0129 06:36:52.116256 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:52 crc kubenswrapper[4861]: E0129 06:36:52.116424 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.116980 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 22:21:02.603641715 +0000 UTC Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.197801 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.197854 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.197872 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.197895 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.197913 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.301386 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.301442 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.301459 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.301483 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.301501 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.404254 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.404319 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.404368 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.404393 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.404411 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.507443 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.507491 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.507508 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.507533 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.508152 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.611533 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.611764 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.611916 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.612065 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.612574 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.716443 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.716534 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.716560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.716592 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.716610 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.820127 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.820237 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.820255 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.820279 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.820296 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.923969 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.924036 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.924054 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.924105 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:52 crc kubenswrapper[4861]: I0129 06:36:52.924124 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:52Z","lastTransitionTime":"2026-01-29T06:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.026644 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.026718 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.026736 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.026761 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.026782 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.117109 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 14:42:43.950066803 +0000 UTC Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.128695 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.128740 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.128753 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.128769 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.128781 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.137625 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.232205 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.232246 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.232257 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.232275 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.232287 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.334564 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.334609 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.334625 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.334651 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.334670 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.437776 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.437858 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.437880 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.437947 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.437974 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.541529 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.541588 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.541607 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.541630 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.541650 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.644122 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.644184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.644202 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.644228 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.644246 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.748010 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.748109 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.748130 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.748153 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.748171 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.850519 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.850577 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.850595 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.850621 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.850638 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.953482 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.953529 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.953541 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.953559 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:53 crc kubenswrapper[4861]: I0129 06:36:53.953571 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:53Z","lastTransitionTime":"2026-01-29T06:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.056618 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.056663 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.056677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.056697 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.056710 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.115791 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.115936 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:54 crc kubenswrapper[4861]: E0129 06:36:54.116164 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.116251 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.116346 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:54 crc kubenswrapper[4861]: E0129 06:36:54.116493 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:54 crc kubenswrapper[4861]: E0129 06:36:54.116840 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:54 crc kubenswrapper[4861]: E0129 06:36:54.117123 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.117335 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 21:38:34.390443883 +0000 UTC Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.159811 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.159879 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.159902 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.159932 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.159954 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.262512 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.262590 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.262610 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.262636 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.262676 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.364728 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.364800 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.364823 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.364850 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.364875 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.467747 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.467803 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.467821 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.467840 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.467853 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.569975 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.570050 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.570111 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.570147 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.570171 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.673224 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.673290 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.673309 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.673336 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.673358 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.776138 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.776197 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.776215 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.776239 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.776257 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.879424 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.879492 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.879512 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.879537 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.879564 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.982464 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.982634 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.982662 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.982692 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:54 crc kubenswrapper[4861]: I0129 06:36:54.982712 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:54Z","lastTransitionTime":"2026-01-29T06:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.085385 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.085455 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.085474 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.085523 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.085549 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.118030 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 08:05:53.05663744 +0000 UTC Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.188620 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.188650 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.188663 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.188679 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.188692 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.291461 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.291561 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.291620 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.291645 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.291710 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.394440 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.394492 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.394508 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.394532 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.394551 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.497664 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.497724 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.497740 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.497763 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.497784 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.601128 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.601194 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.601212 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.601237 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.601255 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.704406 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.704483 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.704507 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.704562 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.704582 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.806676 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.806737 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.806756 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.806782 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.806799 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.910982 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.911497 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.911515 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.911539 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:55 crc kubenswrapper[4861]: I0129 06:36:55.911558 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:55Z","lastTransitionTime":"2026-01-29T06:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.019833 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.019930 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.019948 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.020025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.020056 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.116300 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.116364 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.116310 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:56 crc kubenswrapper[4861]: E0129 06:36:56.116493 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:56 crc kubenswrapper[4861]: E0129 06:36:56.116664 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:56 crc kubenswrapper[4861]: E0129 06:36:56.116790 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.116977 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:56 crc kubenswrapper[4861]: E0129 06:36:56.117117 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.119158 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 19:34:32.162942836 +0000 UTC Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.123112 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.123165 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.123183 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.123208 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.123226 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.226024 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.226136 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.226164 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.226195 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.226218 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.328560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.328601 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.328611 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.328623 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.328632 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.430946 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.431495 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.431544 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.431569 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.431587 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.534125 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.534173 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.534190 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.534425 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.534459 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.637684 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.637747 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.637771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.637802 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.637825 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.741654 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.741696 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.741707 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.741722 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.741733 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.844728 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.844807 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.844833 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.844865 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.844890 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.947960 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.948043 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.948068 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.948124 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:56 crc kubenswrapper[4861]: I0129 06:36:56.948153 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:56Z","lastTransitionTime":"2026-01-29T06:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.051420 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.051496 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.051522 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.051554 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.051579 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.119370 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 15:31:06.940291806 +0000 UTC Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.153996 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.154049 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.154131 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.154165 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.154186 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.257455 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.257536 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.257560 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.257594 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.257620 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.361436 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.361496 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.361515 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.361542 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.361561 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.464437 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.464500 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.464517 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.464542 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.464559 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.567622 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.567684 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.567703 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.567734 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.567752 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.671374 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.671474 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.671494 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.671519 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.671537 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.774968 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.775128 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.775149 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.775174 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.775238 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.878369 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.878431 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.878452 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.878529 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.878553 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.981528 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.981583 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.981600 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.981622 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:57 crc kubenswrapper[4861]: I0129 06:36:57.981638 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:57Z","lastTransitionTime":"2026-01-29T06:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.085059 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.085220 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.085242 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.085267 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.085286 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.115775 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.115908 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.115782 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.115954 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.115810 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.116145 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.116247 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.116332 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.120037 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 10:49:50.629597334 +0000 UTC Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.187744 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.187798 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.187816 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.187844 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.187862 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.290254 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.290322 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.290344 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.290376 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.290398 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.391510 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.391584 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.391601 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.391625 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.391644 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.412672 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:58Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.417606 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.417656 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.417673 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.417696 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.417712 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.437211 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:58Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.442459 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.442507 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.442524 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.442545 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.442563 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.462932 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:58Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.468025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.468115 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.468140 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.468169 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.468191 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.486826 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:58Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.491852 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.491911 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.491931 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.491958 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.491978 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.510566 4861 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"dad46bc6-8766-4734-bc4e-ff0764d2ff72\\\",\\\"systemUUID\\\":\\\"4242a32a-6d38-415c-93a3-943ed93797ae\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:58Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:58 crc kubenswrapper[4861]: E0129 06:36:58.510851 4861 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.512951 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.512997 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.513014 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.513034 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.513051 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.614951 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.614998 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.615008 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.615021 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.615030 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.717203 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.717237 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.717245 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.717258 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.717266 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.819171 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.819445 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.819480 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.819514 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.819536 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.921642 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.921724 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.921733 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.921747 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:58 crc kubenswrapper[4861]: I0129 06:36:58.921757 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:58Z","lastTransitionTime":"2026-01-29T06:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.024998 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.025054 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.025069 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.025112 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.025125 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.120698 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 20:20:59.227969479 +0000 UTC Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.127649 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.127684 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.127693 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.127704 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.127714 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.138209 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.155903 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.169388 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2kbk8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff129b34-bccd-4a2c-b1c2-75a8a78e1715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db9a733cd163dd80cc3751793263bae97a26d8d7b53af0109bafed08f515a6c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9kvjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2kbk8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.189466 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4942p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da8019d1-2d2c-493d-b80f-1d566eec9475\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:21Z\\\",\\\"message\\\":\\\"2026-01-29T06:35:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d\\\\n2026-01-29T06:35:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_8d05d987-66fe-4094-a6ae-a25c1b875a2d to /host/opt/cni/bin/\\\\n2026-01-29T06:35:36Z [verbose] multus-daemon started\\\\n2026-01-29T06:35:36Z [verbose] Readiness Indicator file check\\\\n2026-01-29T06:36:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:36:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tn628\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4942p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.207554 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d6e74b31-2c38-44dd-86e4-6c3d0eac6a8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://110212c81f7c861d011772f67df52b9bff9b57e0b7126cb9bcbac82923318d81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://add8c38b5092428d7543145573a9888e1344238ddb062abc0d87606f08e32cdc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://486505d1a302cee951468afeb4ca4b241890b6c00afd0c1d83e6588831385750\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.226392 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c26c98bead4492936d6467c282c3b10ac2f023c92cc24c646d5514dd28623e56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.229876 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.229920 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.229933 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.229953 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.229966 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.252573 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a1daa19f0a2c504b3862a72e4130ab3d87411bc2f71a4555a5a437235ec2b77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9abb8eac8c903b22ddcc57d4cc63efa29332aa3072b4428d572622bc5fee0bc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.287144 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6ece014-5432-4877-9449-4253d6124c73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T06:36:32Z\\\",\\\"message\\\":\\\"r Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0129 06:36:32.117107 6936 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117115 6936 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-wkh9p\\\\nI0129 06:36:32.117120 6936 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-wkh9p in node crc\\\\nF0129 06:36:32.117123 6936 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:32Z is after 2025-08-24T17:21:41Z]\\\\nI0129 06:36\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:36:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jpcvl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5xdwl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.304846 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-m9rgs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d15f2c9b-c70f-4574-b682-aeed3426f2c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56e9df7a0dfb76feff8ff096960ed5a32a31110d1e9397cd342633105b521562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h5nvp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:33Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-m9rgs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.321789 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d72d8dc-6f15-4586-acde-6e8ca7b60c12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://499bd4851e4ff980f14c47db56220e6481b754881c8e1b8641f6a6f6ea9b9644\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd7e82d8e9645833053e71734f3dc3ea511cb346622b482426efd837fc2a9716\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j8c77\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2pbqf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.338113 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.338167 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.338179 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.338199 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.338215 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.339871 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ab4064a4-3eb6-40a4-8265-659277a0c89c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44f4b84198cb878a248b3daa7c766de056644c4308280a03f6ebb1d9221358d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b21d63e1683dcdd8f31ab08fb5ece2680c36d64e40f339a74e614146ceb297b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b21d63e1683dcdd8f31ab08fb5ece2680c36d64e40f339a74e614146ceb297b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.351541 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.366446 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bccb8691-d6c8-4698-98ec-1f20073e61c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://891a26148b593708968e95fceded2868a9dc9914dc407179c2c19fc27354094f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f31bb3d56eb36fe2ed450fa018e059f5e08f3b24d46f38c1ce76927d81bbfdc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e8cab422458101032f91812b04982a6ec7e77257f1453129431d46498b7a4d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5be33ef4b92e40dad35790ed39dd9342c1dc71a97ec3e42217ee521e21411361\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://074eadc42e40bd4851235c184ec2df38c373fcfcda7f17a85674a6dadf33e81a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://92977de487cc6c0f6c0b44e6d711e4dda5157d48ad0d4b9705850fc1f6a617a2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e49e1ff2fe9903ae92d3ece45e6ea58b6c1d6a55325a78121701a778fa943d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5j282\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6dfzk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.383686 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fef849ed-aafe-4585-8303-30debc6307be\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d5503e9ee66c7ef8b6927d6885a08599e816cf2aa288135d73bb040faa966e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://022c796f3b0d3339775825d25b2298001519876127359599ff4a4efd7e72dca2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3809322389c905c4ecfb51c6ff48a202f4c97396b4252ea8be4b593caad919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c8a7e09738a804da41cbf174c6525872146b65a17b3a7fecf64b7cb946da001\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://521e395aa78a3a709c497886a881f879e5c8a7394e741e15d593d40378e668f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36c6928ff7a7225f146dbfb1c19b90de69497499e33eb6dfe5b739503f8cd9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36c6928ff7a7225f146dbfb1c19b90de69497499e33eb6dfe5b739503f8cd9a7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://634fa5f7681478f2a6e2efb1967bcc201c9b13cdf923965df5b74e8b697eacd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://634fa5f7681478f2a6e2efb1967bcc201c9b13cdf923965df5b74e8b697eacd7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d19dfe15890316ee84f57f389b2a5eff2fe1979e21e9d1623fe8913ed2be607e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d19dfe15890316ee84f57f389b2a5eff2fe1979e21e9d1623fe8913ed2be607e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.393998 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f3779eb-2fb8-4894-bda8-dc381dbb768b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:36:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8d063ea99e72f14e8dcdde29d68e7a04eb1dbbd00105589f5b90052476bba11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://09955115e76447c165af5032d39e2999bb6d28430e5a638652c01f6fe96ba6cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://908672a0cad6f1bb143a290373a21155608ef253d6269be32d685aac43c8e5a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8493932bcf173f84c7dd1afd13306266a8441550482ea183df7e8a255d5ff5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.412929 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18d6810be5ec8518a09fe95a4aaa48b7619d41edc2d2bfbde5ff6a172dda160a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.428819 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fc70726-e8f8-40d8-b31f-2853e3e856d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5401a82efd76066014299b9ce890dc1d6433ad0bae14ce5788fa1191262c9a22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cfd4p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:31Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wkh9p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.439740 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rh69l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fb22f8f6-1210-4f39-8712-d33efc26239c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ggcwq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rh69l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.441025 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.441064 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.441098 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.441118 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.441129 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.457593 4861 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fead1b79-d9e2-4342-8ec9-039fd63d5a38\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T06:35:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T06:35:30Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0129 06:35:23.363366 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 06:35:23.368885 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2595163848/tls.crt::/tmp/serving-cert-2595163848/tls.key\\\\\\\"\\\\nI0129 06:35:29.089474 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 06:35:29.094395 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 06:35:29.094469 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 06:35:29.094533 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 06:35:29.094566 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 06:35:29.104659 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 06:35:29.104699 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104709 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 06:35:29.104720 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 06:35:29.104727 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 06:35:29.104733 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 06:35:29.104740 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 06:35:29.105109 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 06:35:29.107365 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T06:35:12Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T06:35:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T06:35:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T06:35:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T06:36:59Z is after 2025-08-24T17:21:41Z" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.543146 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.543191 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.543200 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.543225 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.543240 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.645226 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.645305 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.645328 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.645355 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.645373 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.748498 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.748550 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.748563 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.748579 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.748593 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.851898 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.851987 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.852012 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.852042 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.852065 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.956094 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.956160 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.956176 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.956200 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:36:59 crc kubenswrapper[4861]: I0129 06:36:59.956217 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:36:59Z","lastTransitionTime":"2026-01-29T06:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.058027 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.058128 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.058147 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.058174 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.058193 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.115734 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.115763 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.115849 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:00 crc kubenswrapper[4861]: E0129 06:37:00.115993 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.116028 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:00 crc kubenswrapper[4861]: E0129 06:37:00.116232 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:00 crc kubenswrapper[4861]: E0129 06:37:00.116305 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:00 crc kubenswrapper[4861]: E0129 06:37:00.116705 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.117005 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:37:00 crc kubenswrapper[4861]: E0129 06:37:00.117196 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.121193 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 14:19:35.204294476 +0000 UTC Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.160624 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.160693 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.160710 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.160741 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.160759 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.263329 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.263404 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.263423 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.263449 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.263468 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.365849 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.365922 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.365939 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.365967 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.365986 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.469307 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.469366 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.469383 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.469406 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.469424 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.572996 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.573350 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.573385 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.573411 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.573443 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.676404 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.676466 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.676484 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.676508 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.676530 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.779274 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.779339 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.779356 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.779384 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.779404 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.881813 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.881882 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.881902 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.881927 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.881944 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.984692 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.984764 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.984784 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.984813 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:00 crc kubenswrapper[4861]: I0129 06:37:00.984833 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:00Z","lastTransitionTime":"2026-01-29T06:37:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.087521 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.087576 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.087588 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.087609 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.087622 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.121605 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 15:45:37.012607442 +0000 UTC Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.190364 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.190412 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.190432 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.190462 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.190483 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.293908 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.293987 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.294003 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.294027 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.294040 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.398428 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.398533 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.398557 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.398591 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.398609 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.504659 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.504746 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.504758 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.504782 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.504800 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.607730 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.607770 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.607778 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.607795 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.607809 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.710968 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.711027 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.711043 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.711066 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.711109 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.814284 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.814325 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.814333 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.814347 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.814358 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.917754 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.917835 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.917869 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.917905 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:01 crc kubenswrapper[4861]: I0129 06:37:01.917925 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:01Z","lastTransitionTime":"2026-01-29T06:37:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.021359 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.021424 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.021447 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.021470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.021488 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.115559 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.115587 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.115733 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:02 crc kubenswrapper[4861]: E0129 06:37:02.115821 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.115886 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:02 crc kubenswrapper[4861]: E0129 06:37:02.116056 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:02 crc kubenswrapper[4861]: E0129 06:37:02.116506 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:02 crc kubenswrapper[4861]: E0129 06:37:02.116626 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.122621 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 09:38:34.69768446 +0000 UTC Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.124542 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.124581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.124596 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.124656 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.124673 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.228041 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.228172 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.228200 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.228232 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.228254 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.332575 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.332673 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.332697 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.332734 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.332758 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.436530 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.436574 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.436592 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.436615 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.436632 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.540387 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.540459 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.540479 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.540507 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.540526 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.644137 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.644201 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.644221 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.644250 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.644269 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.747162 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.747235 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.747253 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.747280 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.747300 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.849600 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.849981 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.850238 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.850470 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.850690 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.953846 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.953929 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.953948 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.953977 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:02 crc kubenswrapper[4861]: I0129 06:37:02.954001 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:02Z","lastTransitionTime":"2026-01-29T06:37:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.058005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.058151 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.058184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.058228 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.058254 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.123054 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 08:21:31.193817075 +0000 UTC Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.160961 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.161065 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.161136 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.161175 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.161202 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.265259 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.265323 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.265342 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.265371 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.265393 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.368897 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.368963 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.368982 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.369011 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.369029 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.472728 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.472800 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.472819 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.472847 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.472870 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.575651 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.575721 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.575739 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.575771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.575792 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.678960 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.679011 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.679029 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.679053 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.679102 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.782773 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.783123 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.783286 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.783448 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.783589 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.886677 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.886742 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.886762 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.886789 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.886809 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.990976 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.991050 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.991113 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.991147 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:03 crc kubenswrapper[4861]: I0129 06:37:03.991171 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:03Z","lastTransitionTime":"2026-01-29T06:37:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.094553 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.094620 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.094638 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.094667 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.094708 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.116608 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.116636 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.116665 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.116813 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:04 crc kubenswrapper[4861]: E0129 06:37:04.116840 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:04 crc kubenswrapper[4861]: E0129 06:37:04.116959 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:04 crc kubenswrapper[4861]: E0129 06:37:04.117136 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:04 crc kubenswrapper[4861]: E0129 06:37:04.117287 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.124334 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 20:14:09.383442736 +0000 UTC Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.199026 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.199126 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.199144 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.199172 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.199194 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.302806 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.302891 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.302924 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.302961 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.302992 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.406454 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.406532 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.406553 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.406582 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.406603 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.510602 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.510646 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.510664 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.510693 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.510715 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.615024 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.615123 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.615138 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.615161 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.615202 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.717817 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.717889 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.717901 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.717920 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.717934 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.821547 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.821614 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.821632 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.821665 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.821687 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.926236 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.926317 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.926339 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.926370 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:04 crc kubenswrapper[4861]: I0129 06:37:04.926389 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:04Z","lastTransitionTime":"2026-01-29T06:37:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.030613 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.030669 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.030687 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.030714 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.030733 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.124733 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 02:27:53.357395579 +0000 UTC Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.134676 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.134759 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.134781 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.134817 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.134837 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.238615 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.238692 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.238711 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.238771 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.238791 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.343052 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.343136 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.343157 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.343180 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.343197 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.446894 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.446962 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.446982 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.447013 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.447038 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.551003 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.551372 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.551412 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.551449 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.551473 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.658011 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.658163 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.658184 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.658212 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.658231 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.761434 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.761518 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.761547 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.761581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.761608 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.865186 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.865246 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.865264 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.865290 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.865311 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.969375 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.969467 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.969492 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.969527 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:05 crc kubenswrapper[4861]: I0129 06:37:05.969550 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:05Z","lastTransitionTime":"2026-01-29T06:37:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.073314 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.073395 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.073413 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.073437 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.073458 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.116334 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.116343 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.116461 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.116471 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:06 crc kubenswrapper[4861]: E0129 06:37:06.116600 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:06 crc kubenswrapper[4861]: E0129 06:37:06.116791 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:06 crc kubenswrapper[4861]: E0129 06:37:06.117276 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:06 crc kubenswrapper[4861]: E0129 06:37:06.117569 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.125726 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 18:06:21.014705376 +0000 UTC Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.176248 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.176321 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.176340 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.176365 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.176384 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.279929 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.280005 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.280028 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.280055 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.280147 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.382876 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.382931 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.382948 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.382969 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.382987 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.485648 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.485714 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.485733 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.485759 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.485776 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.588751 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.588813 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.588835 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.588864 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.588886 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.692144 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.692220 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.692244 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.692277 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.692296 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.795822 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.795888 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.795907 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.795935 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.795953 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.922958 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.923035 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.923049 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.923097 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:06 crc kubenswrapper[4861]: I0129 06:37:06.923114 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:06Z","lastTransitionTime":"2026-01-29T06:37:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.026588 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.026656 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.026667 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.026688 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.026700 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.126051 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 17:18:11.183774428 +0000 UTC Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.129418 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.129506 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.129524 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.129581 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.129604 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.232496 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.233057 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.233107 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.233141 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.233163 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.336067 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.336181 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.336204 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.336237 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.336263 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.439317 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.439367 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.439381 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.439400 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.439413 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.542497 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.542569 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.542590 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.542620 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.542640 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.645166 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.645247 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.645273 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.645304 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.645329 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.748690 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.748757 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.748774 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.748798 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.748816 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.850433 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/1.log" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.852853 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/0.log" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.852942 4861 generic.go:334] "Generic (PLEG): container finished" podID="da8019d1-2d2c-493d-b80f-1d566eec9475" containerID="3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2" exitCode=1 Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.852999 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4942p" event={"ID":"da8019d1-2d2c-493d-b80f-1d566eec9475","Type":"ContainerDied","Data":"3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.853155 4861 scope.go:117] "RemoveContainer" containerID="740c7625d6e07c1aa8caa1365aa8e7af04f2d6628b1cb38f4341509256aa36a8" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.853645 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.853696 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.853715 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.853739 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.853757 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.854158 4861 scope.go:117] "RemoveContainer" containerID="3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2" Jan 29 06:37:07 crc kubenswrapper[4861]: E0129 06:37:07.854799 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-4942p_openshift-multus(da8019d1-2d2c-493d-b80f-1d566eec9475)\"" pod="openshift-multus/multus-4942p" podUID="da8019d1-2d2c-493d-b80f-1d566eec9475" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.908424 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=25.908354158 podStartE2EDuration="25.908354158s" podCreationTimestamp="2026-01-29 06:36:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:07.8855401 +0000 UTC m=+119.557034687" watchObservedRunningTime="2026-01-29 06:37:07.908354158 +0000 UTC m=+119.579848745" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.959044 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.959179 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.959206 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.959239 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:07 crc kubenswrapper[4861]: I0129 06:37:07.959266 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:07Z","lastTransitionTime":"2026-01-29T06:37:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.001710 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-m9rgs" podStartSLOduration=97.001683918 podStartE2EDuration="1m37.001683918s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.000855525 +0000 UTC m=+119.672350152" watchObservedRunningTime="2026-01-29 06:37:08.001683918 +0000 UTC m=+119.673178485" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.063331 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.063380 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.063393 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.063413 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.063426 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:08Z","lastTransitionTime":"2026-01-29T06:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.066844 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=15.066807722 podStartE2EDuration="15.066807722s" podCreationTimestamp="2026-01-29 06:36:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.063853032 +0000 UTC m=+119.735347659" watchObservedRunningTime="2026-01-29 06:37:08.066807722 +0000 UTC m=+119.738302309" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.067310 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2pbqf" podStartSLOduration=97.067299276 podStartE2EDuration="1m37.067299276s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.024120286 +0000 UTC m=+119.695614913" watchObservedRunningTime="2026-01-29 06:37:08.067299276 +0000 UTC m=+119.738793863" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.115890 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.115959 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.116022 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:08 crc kubenswrapper[4861]: E0129 06:37:08.116377 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.116446 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:08 crc kubenswrapper[4861]: E0129 06:37:08.116516 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:08 crc kubenswrapper[4861]: E0129 06:37:08.116675 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:08 crc kubenswrapper[4861]: E0129 06:37:08.116833 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.127047 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 06:31:40.180514409 +0000 UTC Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.144011 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-6dfzk" podStartSLOduration=97.143977644 podStartE2EDuration="1m37.143977644s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.118118853 +0000 UTC m=+119.789613480" watchObservedRunningTime="2026-01-29 06:37:08.143977644 +0000 UTC m=+119.815472241" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.144367 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=97.144358574 podStartE2EDuration="1m37.144358574s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.143732767 +0000 UTC m=+119.815227354" watchObservedRunningTime="2026-01-29 06:37:08.144358574 +0000 UTC m=+119.815853171" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.167051 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.167199 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.167223 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.167249 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.167282 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:08Z","lastTransitionTime":"2026-01-29T06:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.167761 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=66.167730907 podStartE2EDuration="1m6.167730907s" podCreationTimestamp="2026-01-29 06:36:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.166935286 +0000 UTC m=+119.838429883" watchObservedRunningTime="2026-01-29 06:37:08.167730907 +0000 UTC m=+119.839225484" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.216167 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podStartSLOduration=97.216138428 podStartE2EDuration="1m37.216138428s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.215589143 +0000 UTC m=+119.887083700" watchObservedRunningTime="2026-01-29 06:37:08.216138428 +0000 UTC m=+119.887633025" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.269884 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.269925 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.269938 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.269952 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.269962 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:08Z","lastTransitionTime":"2026-01-29T06:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.286830 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=97.286810714 podStartE2EDuration="1m37.286810714s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.265300811 +0000 UTC m=+119.936795388" watchObservedRunningTime="2026-01-29 06:37:08.286810714 +0000 UTC m=+119.958305271" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.314235 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-2kbk8" podStartSLOduration=97.314220726 podStartE2EDuration="1m37.314220726s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:08.313513477 +0000 UTC m=+119.985008034" watchObservedRunningTime="2026-01-29 06:37:08.314220726 +0000 UTC m=+119.985715283" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.372837 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.372881 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.372890 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.372905 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.372915 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:08Z","lastTransitionTime":"2026-01-29T06:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.475512 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.475594 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.475613 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.475644 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.475663 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:08Z","lastTransitionTime":"2026-01-29T06:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.578616 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.578685 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.578702 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.578728 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.578751 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:08Z","lastTransitionTime":"2026-01-29T06:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.682280 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.682340 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.682352 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.682376 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.682392 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:08Z","lastTransitionTime":"2026-01-29T06:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.754406 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.754547 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.754577 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.754613 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.754828 4861 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T06:37:08Z","lastTransitionTime":"2026-01-29T06:37:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.826290 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff"] Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.827010 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.830140 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.830636 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.832552 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.836667 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.841828 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/41391fb0-4b47-4d50-9508-c22aae1ef1ae-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.841901 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/41391fb0-4b47-4d50-9508-c22aae1ef1ae-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.841963 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41391fb0-4b47-4d50-9508-c22aae1ef1ae-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.842036 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/41391fb0-4b47-4d50-9508-c22aae1ef1ae-service-ca\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.842241 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/41391fb0-4b47-4d50-9508-c22aae1ef1ae-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.863003 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/1.log" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.943386 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/41391fb0-4b47-4d50-9508-c22aae1ef1ae-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.943527 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/41391fb0-4b47-4d50-9508-c22aae1ef1ae-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.943575 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41391fb0-4b47-4d50-9508-c22aae1ef1ae-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.943613 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/41391fb0-4b47-4d50-9508-c22aae1ef1ae-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.943662 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/41391fb0-4b47-4d50-9508-c22aae1ef1ae-service-ca\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.943698 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/41391fb0-4b47-4d50-9508-c22aae1ef1ae-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.943648 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/41391fb0-4b47-4d50-9508-c22aae1ef1ae-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.945930 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/41391fb0-4b47-4d50-9508-c22aae1ef1ae-service-ca\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.954628 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41391fb0-4b47-4d50-9508-c22aae1ef1ae-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:08 crc kubenswrapper[4861]: I0129 06:37:08.975155 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/41391fb0-4b47-4d50-9508-c22aae1ef1ae-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-shdff\" (UID: \"41391fb0-4b47-4d50-9508-c22aae1ef1ae\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:09 crc kubenswrapper[4861]: E0129 06:37:09.095712 4861 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 29 06:37:09 crc kubenswrapper[4861]: I0129 06:37:09.127719 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 10:13:14.171411086 +0000 UTC Jan 29 06:37:09 crc kubenswrapper[4861]: I0129 06:37:09.127961 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 29 06:37:09 crc kubenswrapper[4861]: I0129 06:37:09.139280 4861 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 29 06:37:09 crc kubenswrapper[4861]: I0129 06:37:09.155899 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 06:37:09 crc kubenswrapper[4861]: I0129 06:37:09.161308 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" Jan 29 06:37:09 crc kubenswrapper[4861]: W0129 06:37:09.185461 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41391fb0_4b47_4d50_9508_c22aae1ef1ae.slice/crio-34e497588a5c77b83d4ab965491593dcd4ed0c01ccc7dca67f9922fcc066b3ac WatchSource:0}: Error finding container 34e497588a5c77b83d4ab965491593dcd4ed0c01ccc7dca67f9922fcc066b3ac: Status 404 returned error can't find the container with id 34e497588a5c77b83d4ab965491593dcd4ed0c01ccc7dca67f9922fcc066b3ac Jan 29 06:37:09 crc kubenswrapper[4861]: E0129 06:37:09.233722 4861 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 06:37:09 crc kubenswrapper[4861]: I0129 06:37:09.869438 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" event={"ID":"41391fb0-4b47-4d50-9508-c22aae1ef1ae","Type":"ContainerStarted","Data":"0c9a0f978a501627fe624367de2afb5ed2bd750cd83288cb35e78b90421c578d"} Jan 29 06:37:09 crc kubenswrapper[4861]: I0129 06:37:09.869510 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" event={"ID":"41391fb0-4b47-4d50-9508-c22aae1ef1ae","Type":"ContainerStarted","Data":"34e497588a5c77b83d4ab965491593dcd4ed0c01ccc7dca67f9922fcc066b3ac"} Jan 29 06:37:09 crc kubenswrapper[4861]: I0129 06:37:09.896791 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shdff" podStartSLOduration=98.896770534 podStartE2EDuration="1m38.896770534s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:09.895023926 +0000 UTC m=+121.566518523" watchObservedRunningTime="2026-01-29 06:37:09.896770534 +0000 UTC m=+121.568265121" Jan 29 06:37:10 crc kubenswrapper[4861]: I0129 06:37:10.115728 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:10 crc kubenswrapper[4861]: I0129 06:37:10.115785 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:10 crc kubenswrapper[4861]: E0129 06:37:10.115984 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:10 crc kubenswrapper[4861]: E0129 06:37:10.116162 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:10 crc kubenswrapper[4861]: I0129 06:37:10.116615 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:10 crc kubenswrapper[4861]: I0129 06:37:10.116741 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:10 crc kubenswrapper[4861]: E0129 06:37:10.117152 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:10 crc kubenswrapper[4861]: E0129 06:37:10.116933 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:12 crc kubenswrapper[4861]: I0129 06:37:12.116213 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:12 crc kubenswrapper[4861]: I0129 06:37:12.116255 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:12 crc kubenswrapper[4861]: E0129 06:37:12.116399 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:12 crc kubenswrapper[4861]: I0129 06:37:12.116608 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:12 crc kubenswrapper[4861]: I0129 06:37:12.116659 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:12 crc kubenswrapper[4861]: E0129 06:37:12.117369 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:12 crc kubenswrapper[4861]: E0129 06:37:12.117473 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:12 crc kubenswrapper[4861]: E0129 06:37:12.117589 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:12 crc kubenswrapper[4861]: I0129 06:37:12.117901 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:37:12 crc kubenswrapper[4861]: E0129 06:37:12.118298 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5xdwl_openshift-ovn-kubernetes(c6ece014-5432-4877-9449-4253d6124c73)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" Jan 29 06:37:14 crc kubenswrapper[4861]: I0129 06:37:14.115734 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:14 crc kubenswrapper[4861]: I0129 06:37:14.115776 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:14 crc kubenswrapper[4861]: E0129 06:37:14.115858 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:14 crc kubenswrapper[4861]: I0129 06:37:14.115730 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:14 crc kubenswrapper[4861]: I0129 06:37:14.115802 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:14 crc kubenswrapper[4861]: E0129 06:37:14.115980 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:14 crc kubenswrapper[4861]: E0129 06:37:14.116100 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:14 crc kubenswrapper[4861]: E0129 06:37:14.116201 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:14 crc kubenswrapper[4861]: E0129 06:37:14.234957 4861 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 06:37:16 crc kubenswrapper[4861]: I0129 06:37:16.115843 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:16 crc kubenswrapper[4861]: I0129 06:37:16.115842 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:16 crc kubenswrapper[4861]: E0129 06:37:16.116492 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:16 crc kubenswrapper[4861]: I0129 06:37:16.115911 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:16 crc kubenswrapper[4861]: I0129 06:37:16.115808 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:16 crc kubenswrapper[4861]: E0129 06:37:16.116723 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:16 crc kubenswrapper[4861]: E0129 06:37:16.116834 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:16 crc kubenswrapper[4861]: E0129 06:37:16.116875 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:18 crc kubenswrapper[4861]: I0129 06:37:18.115627 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:18 crc kubenswrapper[4861]: I0129 06:37:18.115715 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:18 crc kubenswrapper[4861]: I0129 06:37:18.115650 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:18 crc kubenswrapper[4861]: E0129 06:37:18.115855 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:18 crc kubenswrapper[4861]: E0129 06:37:18.116239 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:18 crc kubenswrapper[4861]: I0129 06:37:18.115870 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:18 crc kubenswrapper[4861]: E0129 06:37:18.116376 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:18 crc kubenswrapper[4861]: E0129 06:37:18.116508 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:19 crc kubenswrapper[4861]: E0129 06:37:19.236498 4861 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 06:37:20 crc kubenswrapper[4861]: I0129 06:37:20.115596 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:20 crc kubenswrapper[4861]: I0129 06:37:20.115678 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:20 crc kubenswrapper[4861]: E0129 06:37:20.115828 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:20 crc kubenswrapper[4861]: I0129 06:37:20.115868 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:20 crc kubenswrapper[4861]: I0129 06:37:20.115912 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:20 crc kubenswrapper[4861]: E0129 06:37:20.116113 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:20 crc kubenswrapper[4861]: E0129 06:37:20.116220 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:20 crc kubenswrapper[4861]: E0129 06:37:20.116364 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:22 crc kubenswrapper[4861]: I0129 06:37:22.116044 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:22 crc kubenswrapper[4861]: E0129 06:37:22.116218 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:22 crc kubenswrapper[4861]: I0129 06:37:22.116308 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:22 crc kubenswrapper[4861]: I0129 06:37:22.116304 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:22 crc kubenswrapper[4861]: E0129 06:37:22.116452 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:22 crc kubenswrapper[4861]: I0129 06:37:22.116571 4861 scope.go:117] "RemoveContainer" containerID="3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2" Jan 29 06:37:22 crc kubenswrapper[4861]: E0129 06:37:22.116814 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:22 crc kubenswrapper[4861]: I0129 06:37:22.116841 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:22 crc kubenswrapper[4861]: E0129 06:37:22.116967 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:22 crc kubenswrapper[4861]: I0129 06:37:22.918872 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/1.log" Jan 29 06:37:22 crc kubenswrapper[4861]: I0129 06:37:22.919288 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4942p" event={"ID":"da8019d1-2d2c-493d-b80f-1d566eec9475","Type":"ContainerStarted","Data":"22314fbcbe190aa61fb61652edc5f6d76649483f0d312e78cf43fb8b4fa49d7e"} Jan 29 06:37:24 crc kubenswrapper[4861]: I0129 06:37:24.115693 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:24 crc kubenswrapper[4861]: I0129 06:37:24.115747 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:24 crc kubenswrapper[4861]: E0129 06:37:24.115961 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:24 crc kubenswrapper[4861]: I0129 06:37:24.116033 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:24 crc kubenswrapper[4861]: E0129 06:37:24.116255 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:24 crc kubenswrapper[4861]: E0129 06:37:24.116403 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:24 crc kubenswrapper[4861]: I0129 06:37:24.116452 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:24 crc kubenswrapper[4861]: E0129 06:37:24.116884 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:24 crc kubenswrapper[4861]: E0129 06:37:24.237561 4861 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 06:37:25 crc kubenswrapper[4861]: I0129 06:37:25.117347 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:37:25 crc kubenswrapper[4861]: I0129 06:37:25.932875 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/3.log" Jan 29 06:37:25 crc kubenswrapper[4861]: I0129 06:37:25.936554 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerStarted","Data":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} Jan 29 06:37:25 crc kubenswrapper[4861]: I0129 06:37:25.937063 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:37:25 crc kubenswrapper[4861]: I0129 06:37:25.970035 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podStartSLOduration=114.970010668 podStartE2EDuration="1m54.970010668s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:25.96970991 +0000 UTC m=+137.641204487" watchObservedRunningTime="2026-01-29 06:37:25.970010668 +0000 UTC m=+137.641505255" Jan 29 06:37:25 crc kubenswrapper[4861]: I0129 06:37:25.971066 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-4942p" podStartSLOduration=114.971053965 podStartE2EDuration="1m54.971053965s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:22.944959568 +0000 UTC m=+134.616454185" watchObservedRunningTime="2026-01-29 06:37:25.971053965 +0000 UTC m=+137.642548552" Jan 29 06:37:26 crc kubenswrapper[4861]: I0129 06:37:26.116122 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:26 crc kubenswrapper[4861]: I0129 06:37:26.116184 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:26 crc kubenswrapper[4861]: I0129 06:37:26.116127 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:26 crc kubenswrapper[4861]: I0129 06:37:26.116122 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:26 crc kubenswrapper[4861]: E0129 06:37:26.116247 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:26 crc kubenswrapper[4861]: E0129 06:37:26.116346 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:26 crc kubenswrapper[4861]: E0129 06:37:26.116514 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:26 crc kubenswrapper[4861]: E0129 06:37:26.116850 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:26 crc kubenswrapper[4861]: I0129 06:37:26.116862 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-rh69l"] Jan 29 06:37:26 crc kubenswrapper[4861]: I0129 06:37:26.940949 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:26 crc kubenswrapper[4861]: E0129 06:37:26.941535 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:28 crc kubenswrapper[4861]: I0129 06:37:28.115797 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:28 crc kubenswrapper[4861]: I0129 06:37:28.115854 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:28 crc kubenswrapper[4861]: I0129 06:37:28.115885 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:28 crc kubenswrapper[4861]: E0129 06:37:28.116027 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rh69l" podUID="fb22f8f6-1210-4f39-8712-d33efc26239c" Jan 29 06:37:28 crc kubenswrapper[4861]: I0129 06:37:28.116063 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:28 crc kubenswrapper[4861]: E0129 06:37:28.116321 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 06:37:28 crc kubenswrapper[4861]: E0129 06:37:28.116399 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 06:37:28 crc kubenswrapper[4861]: E0129 06:37:28.116507 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.294629 4861 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.373431 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-q6lpc"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.374350 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.375751 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.376569 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.380770 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.380978 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.381033 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.381346 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.381864 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.382056 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.382300 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.382461 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.382592 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.382736 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.382925 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.383137 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.385399 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.386029 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.386633 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pzvfs"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.387267 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.390122 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.391317 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-r2hxt"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.394211 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.394536 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.395339 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.396828 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.397469 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.397546 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.397659 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.397868 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.397898 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.397986 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.398059 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.398878 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.399522 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.399707 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-s46nf"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.400211 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.400975 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.401460 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.401496 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.401761 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.408032 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.408293 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.408432 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.408878 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409260 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409319 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409411 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409552 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409674 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409779 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409917 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409925 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.409955 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.410045 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.414502 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.414794 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.415040 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.430183 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.435377 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.435588 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.435669 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-hrbzh"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.436579 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.441999 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.442475 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.442775 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.442968 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.442999 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.443047 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.443347 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.443367 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.443784 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.443842 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.444020 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.444121 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.444252 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.444375 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.444882 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.444982 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.445823 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.445973 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.447726 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.447818 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.468229 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.469064 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-qfjmk"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.469545 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.469982 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.470329 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.470393 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.470645 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-24c9r"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.471085 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-jng8m"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.471376 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-jng8m" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.471459 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.471591 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.471855 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rxbpq"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.472294 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.475705 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.475815 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.476768 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.477459 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.478588 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.479727 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.480998 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.481461 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.482699 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.482790 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.482925 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483014 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483405 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483737 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-dir\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483773 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7vn6\" (UniqueName: \"kubernetes.io/projected/3155916f-0fd6-4e27-9ddc-d0cff45ae575-kube-api-access-n7vn6\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483793 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483807 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483825 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/e5e6daea-2570-40a3-8139-045ce39e48f1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8lw4d\" (UID: \"e5e6daea-2570-40a3-8139-045ce39e48f1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483842 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483856 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-audit-policies\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483872 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483888 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483904 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483918 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-etcd-client\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483931 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3155916f-0fd6-4e27-9ddc-d0cff45ae575-audit-dir\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483953 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-config\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483968 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-image-import-ca\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.483985 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-trusted-ca-bundle\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484006 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484022 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee0c951e-d5e0-40f6-8591-e6251fd23376-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484038 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-serving-cert\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484051 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9c7d54e5-91df-4e16-ae91-a4310316c572-audit-dir\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484065 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9z77b\" (UniqueName: \"kubernetes.io/projected/ee0c951e-d5e0-40f6-8591-e6251fd23376-kube-api-access-9z77b\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484094 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-client-ca\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484111 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qpzm\" (UniqueName: \"kubernetes.io/projected/9c7d54e5-91df-4e16-ae91-a4310316c572-kube-api-access-5qpzm\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484125 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484142 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtgm6\" (UniqueName: \"kubernetes.io/projected/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-kube-api-access-dtgm6\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484157 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-etcd-serving-ca\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484173 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sthrp\" (UniqueName: \"kubernetes.io/projected/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-kube-api-access-sthrp\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484196 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp9fw\" (UniqueName: \"kubernetes.io/projected/ffe2c087-e478-43ea-89b2-af4c64778c35-kube-api-access-kp9fw\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484213 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-serving-cert\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484227 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ht2kb\" (UniqueName: \"kubernetes.io/projected/a8ab995e-981b-45e3-a79e-481f09d9e9d6-kube-api-access-ht2kb\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484242 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a8ab995e-981b-45e3-a79e-481f09d9e9d6-auth-proxy-config\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484257 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484271 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484287 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484301 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8ab995e-981b-45e3-a79e-481f09d9e9d6-config\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484316 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-client-ca\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484331 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484345 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-config\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484359 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3155916f-0fd6-4e27-9ddc-d0cff45ae575-node-pullsecrets\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484374 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59ktg\" (UniqueName: \"kubernetes.io/projected/407f7505-8386-467a-9b71-e1aea70b9c3d-kube-api-access-59ktg\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484389 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a8ab995e-981b-45e3-a79e-481f09d9e9d6-machine-approver-tls\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484403 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484418 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-config\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484438 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-encryption-config\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484453 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484468 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-config\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484482 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-images\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484496 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee0c951e-d5e0-40f6-8591-e6251fd23376-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484511 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-encryption-config\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484535 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484553 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-serving-cert\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484566 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-etcd-client\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484582 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4966\" (UniqueName: \"kubernetes.io/projected/e5e6daea-2570-40a3-8139-045ce39e48f1-kube-api-access-b4966\") pod \"cluster-samples-operator-665b6dd947-8lw4d\" (UID: \"e5e6daea-2570-40a3-8139-045ce39e48f1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484597 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffe2c087-e478-43ea-89b2-af4c64778c35-serving-cert\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484611 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-audit\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484628 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-policies\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.484676 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-5d7b7"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.485050 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.485572 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.486227 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.486380 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.486705 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bqw59"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.487044 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.487130 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.487633 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.488358 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.494775 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.494929 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.495433 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.509493 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.509625 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.509725 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.516015 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.516244 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.516478 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.516623 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.518179 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.519288 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.524592 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.524859 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.525014 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.525215 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.525898 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.526120 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.526770 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.526827 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.527041 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.527144 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.527402 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.527563 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.527706 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.528107 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qsbrr"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.529738 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.529933 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.553369 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-9qj5w"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.553879 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.554289 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v4kqn"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.554643 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.556280 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.557097 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.558510 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.559236 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.560807 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.560908 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.561439 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.561773 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.561925 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.562044 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.562146 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.562416 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.563588 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.564029 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.564260 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.570032 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.570528 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7tpcj"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.570821 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.571188 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.571361 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.571483 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.572094 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.572540 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.579592 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.581029 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.581505 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.582219 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-f9vlr"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.582586 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.583265 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.583946 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.584160 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.584811 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585179 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qpzm\" (UniqueName: \"kubernetes.io/projected/9c7d54e5-91df-4e16-ae91-a4310316c572-kube-api-access-5qpzm\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585217 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585238 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtgm6\" (UniqueName: \"kubernetes.io/projected/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-kube-api-access-dtgm6\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585260 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-serving-cert\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585277 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r4lh\" (UniqueName: \"kubernetes.io/projected/f7f7f0b2-fcab-4777-bd72-60bf1b3fede4-kube-api-access-6r4lh\") pod \"downloads-7954f5f757-jng8m\" (UID: \"f7f7f0b2-fcab-4777-bd72-60bf1b3fede4\") " pod="openshift-console/downloads-7954f5f757-jng8m" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585314 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-etcd-serving-ca\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585331 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glccd\" (UniqueName: \"kubernetes.io/projected/952eba6c-ad61-4a8e-9cae-77e3285fed37-kube-api-access-glccd\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585345 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-service-ca\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585360 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/580b962b-dde1-4614-9f11-e3a6cb0dc860-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585380 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sthrp\" (UniqueName: \"kubernetes.io/projected/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-kube-api-access-sthrp\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585395 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9hrh\" (UniqueName: \"kubernetes.io/projected/998c663f-cba1-49d6-8685-14f84d3fa118-kube-api-access-v9hrh\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585410 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585435 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp9fw\" (UniqueName: \"kubernetes.io/projected/ffe2c087-e478-43ea-89b2-af4c64778c35-kube-api-access-kp9fw\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585451 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-serving-cert\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585467 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ht2kb\" (UniqueName: \"kubernetes.io/projected/a8ab995e-981b-45e3-a79e-481f09d9e9d6-kube-api-access-ht2kb\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585481 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-oauth-config\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585495 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-oauth-serving-cert\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585516 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585518 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585532 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a8ab995e-981b-45e3-a79e-481f09d9e9d6-auth-proxy-config\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585548 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-config\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585569 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585764 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585784 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8ab995e-981b-45e3-a79e-481f09d9e9d6-config\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585804 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/998c663f-cba1-49d6-8685-14f84d3fa118-serving-cert\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585823 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/952eba6c-ad61-4a8e-9cae-77e3285fed37-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585840 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-serving-cert\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585857 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mtwj\" (UniqueName: \"kubernetes.io/projected/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-kube-api-access-2mtwj\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585873 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f2f0a0e7-e725-462f-afe4-b76db9ba2864-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585890 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-client-ca\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585908 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.585989 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-config\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586006 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3155916f-0fd6-4e27-9ddc-d0cff45ae575-node-pullsecrets\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586022 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbbql\" (UniqueName: \"kubernetes.io/projected/5319e102-d348-45b0-ab8b-1cf7c04a7c54-kube-api-access-bbbql\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586039 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-trusted-ca-bundle\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586058 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586088 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59ktg\" (UniqueName: \"kubernetes.io/projected/407f7505-8386-467a-9b71-e1aea70b9c3d-kube-api-access-59ktg\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586105 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a8ab995e-981b-45e3-a79e-481f09d9e9d6-machine-approver-tls\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586120 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-config\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586135 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-service-ca-bundle\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586152 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25d0880d-c427-464c-bd37-cda67e6502da-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586166 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586192 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-config\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586209 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-encryption-config\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586232 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-etcd-serving-ca\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586296 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586312 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-config\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586328 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-images\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586333 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586346 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gwjc\" (UniqueName: \"kubernetes.io/projected/efafc8de-1135-4405-bc1c-11c2efcdb99a-kube-api-access-2gwjc\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586448 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586470 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee0c951e-d5e0-40f6-8591-e6251fd23376-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586494 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-encryption-config\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586515 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5319e102-d348-45b0-ab8b-1cf7c04a7c54-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586532 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/952eba6c-ad61-4a8e-9cae-77e3285fed37-proxy-tls\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586548 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25d0880d-c427-464c-bd37-cda67e6502da-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586572 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-serving-cert\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586588 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/efafc8de-1135-4405-bc1c-11c2efcdb99a-metrics-tls\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586607 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-etcd-client\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586623 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-service-ca\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586639 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rncp\" (UniqueName: \"kubernetes.io/projected/0071b780-153c-49f4-89c1-669953dab11b-kube-api-access-6rncp\") pod \"dns-operator-744455d44c-bqw59\" (UID: \"0071b780-153c-49f4-89c1-669953dab11b\") " pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586659 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4966\" (UniqueName: \"kubernetes.io/projected/e5e6daea-2570-40a3-8139-045ce39e48f1-kube-api-access-b4966\") pod \"cluster-samples-operator-665b6dd947-8lw4d\" (UID: \"e5e6daea-2570-40a3-8139-045ce39e48f1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586674 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-ca\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586690 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efafc8de-1135-4405-bc1c-11c2efcdb99a-trusted-ca\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586818 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.586996 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-config\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587032 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffe2c087-e478-43ea-89b2-af4c64778c35-serving-cert\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587055 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-audit\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587095 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-client\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587115 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-policies\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587132 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587160 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f2f0a0e7-e725-462f-afe4-b76db9ba2864-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587178 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-dir\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587193 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0071b780-153c-49f4-89c1-669953dab11b-metrics-tls\") pod \"dns-operator-744455d44c-bqw59\" (UID: \"0071b780-153c-49f4-89c1-669953dab11b\") " pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587208 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/580b962b-dde1-4614-9f11-e3a6cb0dc860-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587224 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efafc8de-1135-4405-bc1c-11c2efcdb99a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587239 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-trusted-ca\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587259 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7vn6\" (UniqueName: \"kubernetes.io/projected/3155916f-0fd6-4e27-9ddc-d0cff45ae575-kube-api-access-n7vn6\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587276 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md9bj\" (UniqueName: \"kubernetes.io/projected/3a5a7122-9da5-4b48-9282-c90135f1b339-kube-api-access-md9bj\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587297 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587317 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587338 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/e5e6daea-2570-40a3-8139-045ce39e48f1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8lw4d\" (UID: \"e5e6daea-2570-40a3-8139-045ce39e48f1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587357 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25d0880d-c427-464c-bd37-cda67e6502da-config\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587378 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-audit-policies\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587395 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587555 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a5a7122-9da5-4b48-9282-c90135f1b339-serving-cert\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587578 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587595 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587616 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587646 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587667 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-etcd-client\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587681 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3155916f-0fd6-4e27-9ddc-d0cff45ae575-audit-dir\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587703 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szm4q\" (UniqueName: \"kubernetes.io/projected/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-kube-api-access-szm4q\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587719 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-q6lpc"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587722 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-config\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587767 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-image-import-ca\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587788 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3a5a7122-9da5-4b48-9282-c90135f1b339-available-featuregates\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587811 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-trusted-ca-bundle\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587831 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/913a3527-6d79-4441-8895-a9212004f20b-serving-cert\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587859 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587876 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee0c951e-d5e0-40f6-8591-e6251fd23376-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587892 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-serving-cert\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587909 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f2f0a0e7-e725-462f-afe4-b76db9ba2864-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587930 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9c7d54e5-91df-4e16-ae91-a4310316c572-audit-dir\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587948 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9z77b\" (UniqueName: \"kubernetes.io/projected/ee0c951e-d5e0-40f6-8591-e6251fd23376-kube-api-access-9z77b\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587965 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-client-ca\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.587983 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tczs\" (UniqueName: \"kubernetes.io/projected/580b962b-dde1-4614-9f11-e3a6cb0dc860-kube-api-access-8tczs\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588004 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5319e102-d348-45b0-ab8b-1cf7c04a7c54-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588021 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pp6l\" (UniqueName: \"kubernetes.io/projected/913a3527-6d79-4441-8895-a9212004f20b-kube-api-access-5pp6l\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588039 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-config\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588057 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gb9sv\" (UniqueName: \"kubernetes.io/projected/f2f0a0e7-e725-462f-afe4-b76db9ba2864-kube-api-access-gb9sv\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588220 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-config\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588254 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-dir\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588447 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-config\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588584 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-policies\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.588905 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.589135 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-client-ca\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.589224 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-image-import-ca\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.590158 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-images\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.590422 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3155916f-0fd6-4e27-9ddc-d0cff45ae575-node-pullsecrets\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.590521 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.590631 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-config\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.590748 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9c7d54e5-91df-4e16-ae91-a4310316c572-audit-dir\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.591533 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.591601 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-trusted-ca-bundle\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.591794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-encryption-config\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.591816 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee0c951e-d5e0-40f6-8591-e6251fd23376-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.591952 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a8ab995e-981b-45e3-a79e-481f09d9e9d6-auth-proxy-config\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.592268 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-etcd-client\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.592553 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/e5e6daea-2570-40a3-8139-045ce39e48f1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8lw4d\" (UID: \"e5e6daea-2570-40a3-8139-045ce39e48f1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.592842 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.593154 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9c7d54e5-91df-4e16-ae91-a4310316c572-audit-policies\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.593274 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.593455 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pzvfs"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.593559 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3155916f-0fd6-4e27-9ddc-d0cff45ae575-audit-dir\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.593661 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.593710 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a8ab995e-981b-45e3-a79e-481f09d9e9d6-machine-approver-tls\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.594084 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8ab995e-981b-45e3-a79e-481f09d9e9d6-config\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.594011 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-encryption-config\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.594839 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3155916f-0fd6-4e27-9ddc-d0cff45ae575-audit\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.595167 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-client-ca\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.595254 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.595287 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee0c951e-d5e0-40f6-8591-e6251fd23376-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.595484 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.596128 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.596210 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-config\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.596143 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.596444 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c7d54e5-91df-4e16-ae91-a4310316c572-serving-cert\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.596653 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-serving-cert\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.596658 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.596692 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.597283 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffe2c087-e478-43ea-89b2-af4c64778c35-serving-cert\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.597490 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3155916f-0fd6-4e27-9ddc-d0cff45ae575-etcd-client\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.598020 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.598533 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-vvhvw"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.599055 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.599110 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.599555 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-serving-cert\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.599680 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.599966 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-x82xx"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.600791 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.601394 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-hrbzh"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.609357 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.609547 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.614414 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.615595 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.616542 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.617131 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-24c9r"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.619291 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.624921 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qsbrr"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.626276 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-r2hxt"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.627422 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.628430 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-jng8m"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.629446 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-5d7b7"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.630515 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.633031 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-qfjmk"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.634129 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.635198 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rxbpq"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.636605 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-s46nf"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.636694 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.638001 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7tpcj"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.639203 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-2lxpk"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.639974 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.640325 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-wxx66"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.640919 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.641385 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v4kqn"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.642704 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-f9vlr"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.643900 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.644934 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.646210 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.647196 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bqw59"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.648222 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.649289 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.650423 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-x82xx"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.651477 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.652872 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-wxx66"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.653708 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-vvhvw"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.654988 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.656044 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.656105 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.657386 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.658506 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.659907 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw"] Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.676341 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689174 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-mountpoint-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689205 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flmzj\" (UniqueName: \"kubernetes.io/projected/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-kube-api-access-flmzj\") pod \"ingress-canary-vvhvw\" (UID: \"3ee9ba97-39ba-4d37-a844-16bfa7d16afc\") " pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689232 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tczs\" (UniqueName: \"kubernetes.io/projected/580b962b-dde1-4614-9f11-e3a6cb0dc860-kube-api-access-8tczs\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689251 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5319e102-d348-45b0-ab8b-1cf7c04a7c54-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689268 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-config\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689285 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gb9sv\" (UniqueName: \"kubernetes.io/projected/f2f0a0e7-e725-462f-afe4-b76db9ba2864-kube-api-access-gb9sv\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689301 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9xdh\" (UniqueName: \"kubernetes.io/projected/80eb150e-f53d-41ef-8338-41643b573ad1-kube-api-access-q9xdh\") pod \"migrator-59844c95c7-hvv84\" (UID: \"80eb150e-f53d-41ef-8338-41643b573ad1\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689319 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-apiservice-cert\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689340 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-serving-cert\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689356 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r4lh\" (UniqueName: \"kubernetes.io/projected/f7f7f0b2-fcab-4777-bd72-60bf1b3fede4-kube-api-access-6r4lh\") pod \"downloads-7954f5f757-jng8m\" (UID: \"f7f7f0b2-fcab-4777-bd72-60bf1b3fede4\") " pod="openshift-console/downloads-7954f5f757-jng8m" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689386 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-service-ca\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689404 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29njp\" (UniqueName: \"kubernetes.io/projected/40e2a853-6297-48dc-85dd-7eccd47c2907-kube-api-access-29njp\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689422 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whbzg\" (UniqueName: \"kubernetes.io/projected/666760cf-9fb4-415b-929e-14212d7cf828-kube-api-access-whbzg\") pod \"control-plane-machine-set-operator-78cbb6b69f-vhjkq\" (UID: \"666760cf-9fb4-415b-929e-14212d7cf828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689442 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9hrh\" (UniqueName: \"kubernetes.io/projected/998c663f-cba1-49d6-8685-14f84d3fa118-kube-api-access-v9hrh\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689481 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689528 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-oauth-config\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689557 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-config\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689580 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-socket-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689598 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/952eba6c-ad61-4a8e-9cae-77e3285fed37-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689620 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/998c663f-cba1-49d6-8685-14f84d3fa118-serving-cert\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689637 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-serving-cert\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689658 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbbql\" (UniqueName: \"kubernetes.io/projected/5319e102-d348-45b0-ab8b-1cf7c04a7c54-kube-api-access-bbbql\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689696 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-service-ca-bundle\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689712 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25d0880d-c427-464c-bd37-cda67e6502da-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689729 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689763 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gwjc\" (UniqueName: \"kubernetes.io/projected/efafc8de-1135-4405-bc1c-11c2efcdb99a-kube-api-access-2gwjc\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689785 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25d0880d-c427-464c-bd37-cda67e6502da-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689802 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40e2a853-6297-48dc-85dd-7eccd47c2907-serving-cert\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689820 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rncp\" (UniqueName: \"kubernetes.io/projected/0071b780-153c-49f4-89c1-669953dab11b-kube-api-access-6rncp\") pod \"dns-operator-744455d44c-bqw59\" (UID: \"0071b780-153c-49f4-89c1-669953dab11b\") " pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689839 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-ca\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689855 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efafc8de-1135-4405-bc1c-11c2efcdb99a-trusted-ca\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689870 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-config\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689889 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-client\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689906 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689923 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f2f0a0e7-e725-462f-afe4-b76db9ba2864-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689952 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efafc8de-1135-4405-bc1c-11c2efcdb99a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689970 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md9bj\" (UniqueName: \"kubernetes.io/projected/3a5a7122-9da5-4b48-9282-c90135f1b339-kube-api-access-md9bj\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.689989 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7edaa7aa-38fe-494a-84a9-f533d9710d1b-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690013 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a5a7122-9da5-4b48-9282-c90135f1b339-serving-cert\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690030 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-cert\") pod \"ingress-canary-vvhvw\" (UID: \"3ee9ba97-39ba-4d37-a844-16bfa7d16afc\") " pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690051 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-csi-data-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690081 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bee90beb-2799-47b4-8a1c-e8185a60e8d7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690104 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbd2b\" (UniqueName: \"kubernetes.io/projected/f095a543-8e25-4662-af3b-fa3cabdae61a-kube-api-access-fbd2b\") pod \"multus-admission-controller-857f4d67dd-qsbrr\" (UID: \"f095a543-8e25-4662-af3b-fa3cabdae61a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690119 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-service-ca\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690132 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f2f0a0e7-e725-462f-afe4-b76db9ba2864-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690336 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npds5\" (UniqueName: \"kubernetes.io/projected/841455ff-8571-49ca-9aec-fb055f63bbef-kube-api-access-npds5\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690360 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pp6l\" (UniqueName: \"kubernetes.io/projected/913a3527-6d79-4441-8895-a9212004f20b-kube-api-access-5pp6l\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690385 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glccd\" (UniqueName: \"kubernetes.io/projected/952eba6c-ad61-4a8e-9cae-77e3285fed37-kube-api-access-glccd\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690401 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/580b962b-dde1-4614-9f11-e3a6cb0dc860-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690471 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bee90beb-2799-47b4-8a1c-e8185a60e8d7-proxy-tls\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690507 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-oauth-serving-cert\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690555 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc7vq\" (UniqueName: \"kubernetes.io/projected/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-kube-api-access-qc7vq\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690576 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq4dq\" (UniqueName: \"kubernetes.io/projected/bee90beb-2799-47b4-8a1c-e8185a60e8d7-kube-api-access-wq4dq\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690612 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mtwj\" (UniqueName: \"kubernetes.io/projected/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-kube-api-access-2mtwj\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690630 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f2f0a0e7-e725-462f-afe4-b76db9ba2864-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690648 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-webhook-cert\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690669 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/666760cf-9fb4-415b-929e-14212d7cf828-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vhjkq\" (UID: \"666760cf-9fb4-415b-929e-14212d7cf828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690690 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-trusted-ca-bundle\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690704 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-tmpfs\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690722 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-config\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690739 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-registration-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690756 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f095a543-8e25-4662-af3b-fa3cabdae61a-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qsbrr\" (UID: \"f095a543-8e25-4662-af3b-fa3cabdae61a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690772 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bee90beb-2799-47b4-8a1c-e8185a60e8d7-images\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690789 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-plugins-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690813 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5319e102-d348-45b0-ab8b-1cf7c04a7c54-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690829 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/952eba6c-ad61-4a8e-9cae-77e3285fed37-proxy-tls\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690845 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7edaa7aa-38fe-494a-84a9-f533d9710d1b-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690861 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/efafc8de-1135-4405-bc1c-11c2efcdb99a-metrics-tls\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690880 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-service-ca\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690905 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7edaa7aa-38fe-494a-84a9-f533d9710d1b-config\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690922 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0071b780-153c-49f4-89c1-669953dab11b-metrics-tls\") pod \"dns-operator-744455d44c-bqw59\" (UID: \"0071b780-153c-49f4-89c1-669953dab11b\") " pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690941 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/580b962b-dde1-4614-9f11-e3a6cb0dc860-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690956 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-trusted-ca\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.690987 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25d0880d-c427-464c-bd37-cda67e6502da-config\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.691003 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40e2a853-6297-48dc-85dd-7eccd47c2907-config\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.691024 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.691051 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szm4q\" (UniqueName: \"kubernetes.io/projected/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-kube-api-access-szm4q\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.691066 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3a5a7122-9da5-4b48-9282-c90135f1b339-available-featuregates\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.691097 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/913a3527-6d79-4441-8895-a9212004f20b-serving-cert\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.691316 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-config\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.691987 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-serving-cert\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.692578 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-config\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.692702 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/952eba6c-ad61-4a8e-9cae-77e3285fed37-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.693295 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f2f0a0e7-e725-462f-afe4-b76db9ba2864-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.693765 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-config\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.693780 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/580b962b-dde1-4614-9f11-e3a6cb0dc860-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.694027 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.694286 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/913a3527-6d79-4441-8895-a9212004f20b-serving-cert\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.694645 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3a5a7122-9da5-4b48-9282-c90135f1b339-available-featuregates\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.694988 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-oauth-config\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.695049 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-client\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.695366 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-oauth-serving-cert\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.695920 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-trusted-ca-bundle\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.695950 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-trusted-ca\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.696104 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-service-ca\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.696231 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.696859 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/580b962b-dde1-4614-9f11-e3a6cb0dc860-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.697020 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f2f0a0e7-e725-462f-afe4-b76db9ba2864-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.697237 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/952eba6c-ad61-4a8e-9cae-77e3285fed37-proxy-tls\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.697299 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/913a3527-6d79-4441-8895-a9212004f20b-service-ca-bundle\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.697775 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/998c663f-cba1-49d6-8685-14f84d3fa118-serving-cert\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.700514 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a5a7122-9da5-4b48-9282-c90135f1b339-serving-cert\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.706144 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-config\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.715750 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.716624 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-serving-cert\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.720647 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/998c663f-cba1-49d6-8685-14f84d3fa118-etcd-ca\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.736337 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.745211 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25d0880d-c427-464c-bd37-cda67e6502da-config\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.756341 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.767486 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25d0880d-c427-464c-bd37-cda67e6502da-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.777375 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.792989 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-mountpoint-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793033 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flmzj\" (UniqueName: \"kubernetes.io/projected/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-kube-api-access-flmzj\") pod \"ingress-canary-vvhvw\" (UID: \"3ee9ba97-39ba-4d37-a844-16bfa7d16afc\") " pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793086 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9xdh\" (UniqueName: \"kubernetes.io/projected/80eb150e-f53d-41ef-8338-41643b573ad1-kube-api-access-q9xdh\") pod \"migrator-59844c95c7-hvv84\" (UID: \"80eb150e-f53d-41ef-8338-41643b573ad1\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793128 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-apiservice-cert\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793153 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29njp\" (UniqueName: \"kubernetes.io/projected/40e2a853-6297-48dc-85dd-7eccd47c2907-kube-api-access-29njp\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793174 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whbzg\" (UniqueName: \"kubernetes.io/projected/666760cf-9fb4-415b-929e-14212d7cf828-kube-api-access-whbzg\") pod \"control-plane-machine-set-operator-78cbb6b69f-vhjkq\" (UID: \"666760cf-9fb4-415b-929e-14212d7cf828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793231 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-socket-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793304 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40e2a853-6297-48dc-85dd-7eccd47c2907-serving-cert\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793297 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-mountpoint-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793376 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7edaa7aa-38fe-494a-84a9-f533d9710d1b-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793544 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-cert\") pod \"ingress-canary-vvhvw\" (UID: \"3ee9ba97-39ba-4d37-a844-16bfa7d16afc\") " pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793596 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-socket-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793600 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-csi-data-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793700 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bee90beb-2799-47b4-8a1c-e8185a60e8d7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793713 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-csi-data-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793741 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbd2b\" (UniqueName: \"kubernetes.io/projected/f095a543-8e25-4662-af3b-fa3cabdae61a-kube-api-access-fbd2b\") pod \"multus-admission-controller-857f4d67dd-qsbrr\" (UID: \"f095a543-8e25-4662-af3b-fa3cabdae61a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793812 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npds5\" (UniqueName: \"kubernetes.io/projected/841455ff-8571-49ca-9aec-fb055f63bbef-kube-api-access-npds5\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793908 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bee90beb-2799-47b4-8a1c-e8185a60e8d7-proxy-tls\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.793958 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc7vq\" (UniqueName: \"kubernetes.io/projected/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-kube-api-access-qc7vq\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794002 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq4dq\" (UniqueName: \"kubernetes.io/projected/bee90beb-2799-47b4-8a1c-e8185a60e8d7-kube-api-access-wq4dq\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794050 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-webhook-cert\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794109 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-tmpfs\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794150 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/666760cf-9fb4-415b-929e-14212d7cf828-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vhjkq\" (UID: \"666760cf-9fb4-415b-929e-14212d7cf828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794193 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f095a543-8e25-4662-af3b-fa3cabdae61a-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qsbrr\" (UID: \"f095a543-8e25-4662-af3b-fa3cabdae61a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794272 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bee90beb-2799-47b4-8a1c-e8185a60e8d7-images\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794323 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-registration-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794346 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-plugins-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794378 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7edaa7aa-38fe-494a-84a9-f533d9710d1b-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794418 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-registration-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794436 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7edaa7aa-38fe-494a-84a9-f533d9710d1b-config\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794444 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/841455ff-8571-49ca-9aec-fb055f63bbef-plugins-dir\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794557 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40e2a853-6297-48dc-85dd-7eccd47c2907-config\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794636 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-tmpfs\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.794817 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bee90beb-2799-47b4-8a1c-e8185a60e8d7-auth-proxy-config\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.796773 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.816986 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.836577 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.857347 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.867876 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0071b780-153c-49f4-89c1-669953dab11b-metrics-tls\") pod \"dns-operator-744455d44c-bqw59\" (UID: \"0071b780-153c-49f4-89c1-669953dab11b\") " pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.876625 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.897125 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.916999 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.936962 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.951634 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/efafc8de-1135-4405-bc1c-11c2efcdb99a-metrics-tls\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.967125 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.973968 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efafc8de-1135-4405-bc1c-11c2efcdb99a-trusted-ca\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.977171 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 29 06:37:29 crc kubenswrapper[4861]: I0129 06:37:29.997280 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.016506 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.036524 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.051323 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.059722 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.061783 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.075907 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.097206 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.116435 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.116491 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.116531 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.116590 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.117789 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.131145 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5319e102-d348-45b0-ab8b-1cf7c04a7c54-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.137462 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.140662 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5319e102-d348-45b0-ab8b-1cf7c04a7c54-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.156818 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.176598 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.185545 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bee90beb-2799-47b4-8a1c-e8185a60e8d7-images\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.197705 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.210565 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bee90beb-2799-47b4-8a1c-e8185a60e8d7-proxy-tls\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.237168 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.258256 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.268850 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f095a543-8e25-4662-af3b-fa3cabdae61a-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qsbrr\" (UID: \"f095a543-8e25-4662-af3b-fa3cabdae61a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.277737 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.297154 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.317314 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.336662 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.357946 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.376655 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.399717 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.416716 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.437824 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.457790 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.477589 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.497194 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.517391 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.537707 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.557645 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.575591 4861 request.go:700] Waited for 1.013339316s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/configmaps?fieldSelector=metadata.name%3Dkube-apiserver-operator-config&limit=500&resourceVersion=0 Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.577849 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.586592 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7edaa7aa-38fe-494a-84a9-f533d9710d1b-config\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.608847 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.617054 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.630763 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7edaa7aa-38fe-494a-84a9-f533d9710d1b-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.638623 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.657738 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.677187 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.697276 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.717248 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.736475 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.749509 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/666760cf-9fb4-415b-929e-14212d7cf828-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vhjkq\" (UID: \"666760cf-9fb4-415b-929e-14212d7cf828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.757187 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.787744 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.793551 4861 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.793607 4861 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.793651 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-apiservice-cert podName:2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8 nodeName:}" failed. No retries permitted until 2026-01-29 06:37:31.293619006 +0000 UTC m=+142.965113593 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-apiservice-cert") pod "packageserver-d55dfcdfc-vvwrw" (UID: "2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8") : failed to sync secret cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.793704 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40e2a853-6297-48dc-85dd-7eccd47c2907-serving-cert podName:40e2a853-6297-48dc-85dd-7eccd47c2907 nodeName:}" failed. No retries permitted until 2026-01-29 06:37:31.293680498 +0000 UTC m=+142.965175085 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/40e2a853-6297-48dc-85dd-7eccd47c2907-serving-cert") pod "service-ca-operator-777779d784-cwtjn" (UID: "40e2a853-6297-48dc-85dd-7eccd47c2907") : failed to sync secret cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.794888 4861 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.794959 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-cert podName:3ee9ba97-39ba-4d37-a844-16bfa7d16afc nodeName:}" failed. No retries permitted until 2026-01-29 06:37:31.294939012 +0000 UTC m=+142.966433609 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-cert") pod "ingress-canary-vvhvw" (UID: "3ee9ba97-39ba-4d37-a844-16bfa7d16afc") : failed to sync secret cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.794985 4861 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.795000 4861 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.795054 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-webhook-cert podName:2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8 nodeName:}" failed. No retries permitted until 2026-01-29 06:37:31.295035274 +0000 UTC m=+142.966529871 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-webhook-cert") pod "packageserver-d55dfcdfc-vvwrw" (UID: "2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8") : failed to sync secret cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: E0129 06:37:30.795152 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/40e2a853-6297-48dc-85dd-7eccd47c2907-config podName:40e2a853-6297-48dc-85dd-7eccd47c2907 nodeName:}" failed. No retries permitted until 2026-01-29 06:37:31.295123187 +0000 UTC m=+142.966617784 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/40e2a853-6297-48dc-85dd-7eccd47c2907-config") pod "service-ca-operator-777779d784-cwtjn" (UID: "40e2a853-6297-48dc-85dd-7eccd47c2907") : failed to sync configmap cache: timed out waiting for the condition Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.797502 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.816722 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.836692 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.857640 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.876357 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.897630 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.917153 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.936407 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.957121 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.977155 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 29 06:37:30 crc kubenswrapper[4861]: I0129 06:37:30.997036 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.016884 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.038252 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.056619 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.076847 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.096894 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.117912 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.137413 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.157487 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.204559 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qpzm\" (UniqueName: \"kubernetes.io/projected/9c7d54e5-91df-4e16-ae91-a4310316c572-kube-api-access-5qpzm\") pod \"apiserver-7bbb656c7d-ffbbx\" (UID: \"9c7d54e5-91df-4e16-ae91-a4310316c572\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.224592 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp9fw\" (UniqueName: \"kubernetes.io/projected/ffe2c087-e478-43ea-89b2-af4c64778c35-kube-api-access-kp9fw\") pod \"route-controller-manager-6576b87f9c-48cc8\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.224792 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.239855 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.247987 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4966\" (UniqueName: \"kubernetes.io/projected/e5e6daea-2570-40a3-8139-045ce39e48f1-kube-api-access-b4966\") pod \"cluster-samples-operator-665b6dd947-8lw4d\" (UID: \"e5e6daea-2570-40a3-8139-045ce39e48f1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.266559 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtgm6\" (UniqueName: \"kubernetes.io/projected/c67abd0d-bb61-4bd2-a58b-42f2969e1ac1-kube-api-access-dtgm6\") pod \"machine-api-operator-5694c8668f-r2hxt\" (UID: \"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.278995 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.284911 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7vn6\" (UniqueName: \"kubernetes.io/projected/3155916f-0fd6-4e27-9ddc-d0cff45ae575-kube-api-access-n7vn6\") pod \"apiserver-76f77b778f-q6lpc\" (UID: \"3155916f-0fd6-4e27-9ddc-d0cff45ae575\") " pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.305282 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sthrp\" (UniqueName: \"kubernetes.io/projected/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-kube-api-access-sthrp\") pod \"controller-manager-879f6c89f-pzvfs\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.321743 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40e2a853-6297-48dc-85dd-7eccd47c2907-serving-cert\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.321830 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-cert\") pod \"ingress-canary-vvhvw\" (UID: \"3ee9ba97-39ba-4d37-a844-16bfa7d16afc\") " pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.321943 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-webhook-cert\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.321999 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40e2a853-6297-48dc-85dd-7eccd47c2907-config\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.322100 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-apiservice-cert\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.322542 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59ktg\" (UniqueName: \"kubernetes.io/projected/407f7505-8386-467a-9b71-e1aea70b9c3d-kube-api-access-59ktg\") pod \"oauth-openshift-558db77b4-s46nf\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.323266 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40e2a853-6297-48dc-85dd-7eccd47c2907-config\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.325489 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40e2a853-6297-48dc-85dd-7eccd47c2907-serving-cert\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.326274 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-webhook-cert\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.327595 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-apiservice-cert\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.338708 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ht2kb\" (UniqueName: \"kubernetes.io/projected/a8ab995e-981b-45e3-a79e-481f09d9e9d6-kube-api-access-ht2kb\") pod \"machine-approver-56656f9798-t7qwz\" (UID: \"a8ab995e-981b-45e3-a79e-481f09d9e9d6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.357201 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.358233 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.365028 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9z77b\" (UniqueName: \"kubernetes.io/projected/ee0c951e-d5e0-40f6-8591-e6251fd23376-kube-api-access-9z77b\") pod \"openshift-apiserver-operator-796bbdcf4f-4mzns\" (UID: \"ee0c951e-d5e0-40f6-8591-e6251fd23376\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.365279 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" Jan 29 06:37:31 crc kubenswrapper[4861]: W0129 06:37:31.384427 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8ab995e_981b_45e3_a79e_481f09d9e9d6.slice/crio-e73b6b4114d63d7dcf3148e4520c9d72da72d0ad7d728f769239911a45134ec1 WatchSource:0}: Error finding container e73b6b4114d63d7dcf3148e4520c9d72da72d0ad7d728f769239911a45134ec1: Status 404 returned error can't find the container with id e73b6b4114d63d7dcf3148e4520c9d72da72d0ad7d728f769239911a45134ec1 Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.384910 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.397843 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.408280 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-cert\") pod \"ingress-canary-vvhvw\" (UID: \"3ee9ba97-39ba-4d37-a844-16bfa7d16afc\") " pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.418162 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.436845 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.456712 4861 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.476515 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.498949 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx"] Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.509893 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.517679 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 29 06:37:31 crc kubenswrapper[4861]: W0129 06:37:31.526656 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c7d54e5_91df_4e16_ae91_a4310316c572.slice/crio-4d7ee3b249db9d2a230331431959b619e7fe3df9de32d98471a2c1163c58939d WatchSource:0}: Error finding container 4d7ee3b249db9d2a230331431959b619e7fe3df9de32d98471a2c1163c58939d: Status 404 returned error can't find the container with id 4d7ee3b249db9d2a230331431959b619e7fe3df9de32d98471a2c1163c58939d Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.536205 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8"] Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.537146 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 29 06:37:31 crc kubenswrapper[4861]: W0129 06:37:31.556628 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffe2c087_e478_43ea_89b2_af4c64778c35.slice/crio-4d2aeb1dd5a80db18e94a150d624f95cf6a7abd39f920674c6cdd69c6b860e32 WatchSource:0}: Error finding container 4d2aeb1dd5a80db18e94a150d624f95cf6a7abd39f920674c6cdd69c6b860e32: Status 404 returned error can't find the container with id 4d2aeb1dd5a80db18e94a150d624f95cf6a7abd39f920674c6cdd69c6b860e32 Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.558047 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.572215 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.576928 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.594207 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.594958 4861 request.go:700] Waited for 1.953805223s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns/secrets?fieldSelector=metadata.name%3Ddns-default-metrics-tls&limit=500&resourceVersion=0 Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.596323 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d"] Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.596551 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.611702 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.617040 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.688304 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tczs\" (UniqueName: \"kubernetes.io/projected/580b962b-dde1-4614-9f11-e3a6cb0dc860-kube-api-access-8tczs\") pod \"openshift-controller-manager-operator-756b6f6bc6-lzhnd\" (UID: \"580b962b-dde1-4614-9f11-e3a6cb0dc860\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.688373 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gb9sv\" (UniqueName: \"kubernetes.io/projected/f2f0a0e7-e725-462f-afe4-b76db9ba2864-kube-api-access-gb9sv\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.689833 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-q6lpc"] Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.692617 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r4lh\" (UniqueName: \"kubernetes.io/projected/f7f7f0b2-fcab-4777-bd72-60bf1b3fede4-kube-api-access-6r4lh\") pod \"downloads-7954f5f757-jng8m\" (UID: \"f7f7f0b2-fcab-4777-bd72-60bf1b3fede4\") " pod="openshift-console/downloads-7954f5f757-jng8m" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.718377 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rncp\" (UniqueName: \"kubernetes.io/projected/0071b780-153c-49f4-89c1-669953dab11b-kube-api-access-6rncp\") pod \"dns-operator-744455d44c-bqw59\" (UID: \"0071b780-153c-49f4-89c1-669953dab11b\") " pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.731113 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-r2hxt"] Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.735203 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-jng8m" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.741452 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9hrh\" (UniqueName: \"kubernetes.io/projected/998c663f-cba1-49d6-8685-14f84d3fa118-kube-api-access-v9hrh\") pod \"etcd-operator-b45778765-5d7b7\" (UID: \"998c663f-cba1-49d6-8685-14f84d3fa118\") " pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.754788 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbbql\" (UniqueName: \"kubernetes.io/projected/5319e102-d348-45b0-ab8b-1cf7c04a7c54-kube-api-access-bbbql\") pod \"kube-storage-version-migrator-operator-b67b599dd-k9th8\" (UID: \"5319e102-d348-45b0-ab8b-1cf7c04a7c54\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.768508 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.771062 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md9bj\" (UniqueName: \"kubernetes.io/projected/3a5a7122-9da5-4b48-9282-c90135f1b339-kube-api-access-md9bj\") pod \"openshift-config-operator-7777fb866f-24c9r\" (UID: \"3a5a7122-9da5-4b48-9282-c90135f1b339\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.782238 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.792889 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mtwj\" (UniqueName: \"kubernetes.io/projected/39b20f61-cd5e-41c6-bce9-35eaa98d85ab-kube-api-access-2mtwj\") pod \"console-operator-58897d9998-qfjmk\" (UID: \"39b20f61-cd5e-41c6-bce9-35eaa98d85ab\") " pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.810988 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f2f0a0e7-e725-462f-afe4-b76db9ba2864-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hxdzl\" (UID: \"f2f0a0e7-e725-462f-afe4-b76db9ba2864\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.829749 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.833420 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szm4q\" (UniqueName: \"kubernetes.io/projected/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-kube-api-access-szm4q\") pod \"console-f9d7485db-hrbzh\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.852028 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efafc8de-1135-4405-bc1c-11c2efcdb99a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.876895 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pzvfs"] Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.877344 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a627dba-db5e-46f8-b214-eb8a9d3e44ed-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gqnvz\" (UID: \"6a627dba-db5e-46f8-b214-eb8a9d3e44ed\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.897501 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns"] Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.959956 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" event={"ID":"e5e6daea-2570-40a3-8139-045ce39e48f1","Type":"ContainerStarted","Data":"3c8098ecc8bf6f093161fd580f19dd987c6ff2e8f6a552ce4de6b0ddb779525e"} Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.961058 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" event={"ID":"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1","Type":"ContainerStarted","Data":"fc22dceb4f01eb6f15729a2da8e25fd657d1c7dc9812417568c9504607ca9286"} Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.962505 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" event={"ID":"a8ab995e-981b-45e3-a79e-481f09d9e9d6","Type":"ContainerStarted","Data":"e73b6b4114d63d7dcf3148e4520c9d72da72d0ad7d728f769239911a45134ec1"} Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.963602 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" event={"ID":"ffe2c087-e478-43ea-89b2-af4c64778c35","Type":"ContainerStarted","Data":"4d2aeb1dd5a80db18e94a150d624f95cf6a7abd39f920674c6cdd69c6b860e32"} Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.964522 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" event={"ID":"3155916f-0fd6-4e27-9ddc-d0cff45ae575","Type":"ContainerStarted","Data":"881305deaa83b439f4f9c50e0103b0580ff612d27317b436c4a95af02cef4125"} Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.965697 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" event={"ID":"9c7d54e5-91df-4e16-ae91-a4310316c572","Type":"ContainerStarted","Data":"4d7ee3b249db9d2a230331431959b619e7fe3df9de32d98471a2c1163c58939d"} Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.972350 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.981747 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.983900 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flmzj\" (UniqueName: \"kubernetes.io/projected/3ee9ba97-39ba-4d37-a844-16bfa7d16afc-kube-api-access-flmzj\") pod \"ingress-canary-vvhvw\" (UID: \"3ee9ba97-39ba-4d37-a844-16bfa7d16afc\") " pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.987522 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glccd\" (UniqueName: \"kubernetes.io/projected/952eba6c-ad61-4a8e-9cae-77e3285fed37-kube-api-access-glccd\") pod \"machine-config-controller-84d6567774-fmh7v\" (UID: \"952eba6c-ad61-4a8e-9cae-77e3285fed37\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.987715 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gwjc\" (UniqueName: \"kubernetes.io/projected/efafc8de-1135-4405-bc1c-11c2efcdb99a-kube-api-access-2gwjc\") pod \"ingress-operator-5b745b69d9-xqwts\" (UID: \"efafc8de-1135-4405-bc1c-11c2efcdb99a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.989540 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pp6l\" (UniqueName: \"kubernetes.io/projected/913a3527-6d79-4441-8895-a9212004f20b-kube-api-access-5pp6l\") pod \"authentication-operator-69f744f599-rxbpq\" (UID: \"913a3527-6d79-4441-8895-a9212004f20b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.990697 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9xdh\" (UniqueName: \"kubernetes.io/projected/80eb150e-f53d-41ef-8338-41643b573ad1-kube-api-access-q9xdh\") pod \"migrator-59844c95c7-hvv84\" (UID: \"80eb150e-f53d-41ef-8338-41643b573ad1\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" Jan 29 06:37:31 crc kubenswrapper[4861]: I0129 06:37:31.992498 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25d0880d-c427-464c-bd37-cda67e6502da-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-vjvgq\" (UID: \"25d0880d-c427-464c-bd37-cda67e6502da\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.021238 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29njp\" (UniqueName: \"kubernetes.io/projected/40e2a853-6297-48dc-85dd-7eccd47c2907-kube-api-access-29njp\") pod \"service-ca-operator-777779d784-cwtjn\" (UID: \"40e2a853-6297-48dc-85dd-7eccd47c2907\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.024290 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.030314 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whbzg\" (UniqueName: \"kubernetes.io/projected/666760cf-9fb4-415b-929e-14212d7cf828-kube-api-access-whbzg\") pod \"control-plane-machine-set-operator-78cbb6b69f-vhjkq\" (UID: \"666760cf-9fb4-415b-929e-14212d7cf828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.042732 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.048268 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" Jan 29 06:37:32 crc kubenswrapper[4861]: W0129 06:37:32.048877 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcdeb1c43_11fd_4a37_b31d_f4c1e7600e36.slice/crio-f6b4456cdf8672d96ac8eb836920b2eb1b8d5af0c356e3cd7d7aacddb2529336 WatchSource:0}: Error finding container f6b4456cdf8672d96ac8eb836920b2eb1b8d5af0c356e3cd7d7aacddb2529336: Status 404 returned error can't find the container with id f6b4456cdf8672d96ac8eb836920b2eb1b8d5af0c356e3cd7d7aacddb2529336 Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.055568 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.056773 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7edaa7aa-38fe-494a-84a9-f533d9710d1b-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lbl9s\" (UID: \"7edaa7aa-38fe-494a-84a9-f533d9710d1b\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.063184 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.076194 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.088948 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.089639 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbd2b\" (UniqueName: \"kubernetes.io/projected/f095a543-8e25-4662-af3b-fa3cabdae61a-kube-api-access-fbd2b\") pod \"multus-admission-controller-857f4d67dd-qsbrr\" (UID: \"f095a543-8e25-4662-af3b-fa3cabdae61a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.097263 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npds5\" (UniqueName: \"kubernetes.io/projected/841455ff-8571-49ca-9aec-fb055f63bbef-kube-api-access-npds5\") pod \"csi-hostpathplugin-x82xx\" (UID: \"841455ff-8571-49ca-9aec-fb055f63bbef\") " pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.106252 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-s46nf"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.120572 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc7vq\" (UniqueName: \"kubernetes.io/projected/2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8-kube-api-access-qc7vq\") pod \"packageserver-d55dfcdfc-vvwrw\" (UID: \"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.133653 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.139593 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq4dq\" (UniqueName: \"kubernetes.io/projected/bee90beb-2799-47b4-8a1c-e8185a60e8d7-kube-api-access-wq4dq\") pod \"machine-config-operator-74547568cd-fnrr2\" (UID: \"bee90beb-2799-47b4-8a1c-e8185a60e8d7\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.140811 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.146165 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.157026 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.158912 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.162250 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-bqw59"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.173656 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.176931 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.184149 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-jng8m"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.186594 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.196020 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.215997 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.223013 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-5d7b7"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.232821 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.238295 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.243360 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.244352 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-vvhvw" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.285555 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-x82xx" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.346672 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.346888 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.346910 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-bound-sa-token\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.346944 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3be4630d-1e62-4c4d-979b-1e8bc839263a-profile-collector-cert\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.346970 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.346986 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cead6a1e-5df8-4937-9ee3-71efe555615f-config-volume\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347003 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zlhd\" (UniqueName: \"kubernetes.io/projected/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-kube-api-access-5zlhd\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347023 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxmsm\" (UniqueName: \"kubernetes.io/projected/4f0d3cda-4ef5-47ea-8138-881b738c1088-kube-api-access-bxmsm\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347041 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9f4vz\" (UniqueName: \"kubernetes.io/projected/8f74e6b3-bf32-4188-9246-3e164a0857ca-kube-api-access-9f4vz\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347055 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3774c246-b396-48e5-bfbc-075fe535b932-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-gjhzk\" (UID: \"3774c246-b396-48e5-bfbc-075fe535b932\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347083 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9qpw\" (UniqueName: \"kubernetes.io/projected/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-kube-api-access-j9qpw\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347109 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/4f0d3cda-4ef5-47ea-8138-881b738c1088-signing-cabundle\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347124 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f74e6b3-bf32-4188-9246-3e164a0857ca-service-ca-bundle\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347150 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-stats-auth\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347170 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/4f0d3cda-4ef5-47ea-8138-881b738c1088-signing-key\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347187 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-trusted-ca\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347204 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3be4630d-1e62-4c4d-979b-1e8bc839263a-srv-cert\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347235 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c66ab458-ce20-4c27-99d5-e328b6397bd4-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347249 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-default-certificate\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347318 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c66ab458-ce20-4c27-99d5-e328b6397bd4-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347368 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347398 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-metrics-certs\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347413 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6m7z\" (UniqueName: \"kubernetes.io/projected/3774c246-b396-48e5-bfbc-075fe535b932-kube-api-access-x6m7z\") pod \"package-server-manager-789f6589d5-gjhzk\" (UID: \"3774c246-b396-48e5-bfbc-075fe535b932\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347427 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-srv-cert\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347460 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cead6a1e-5df8-4937-9ee3-71efe555615f-secret-volume\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347475 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt68k\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-kube-api-access-qt68k\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347659 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-tls\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347673 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl96w\" (UniqueName: \"kubernetes.io/projected/cead6a1e-5df8-4937-9ee3-71efe555615f-kube-api-access-rl96w\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347688 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-certificates\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.347739 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zq8d\" (UniqueName: \"kubernetes.io/projected/3be4630d-1e62-4c4d-979b-1e8bc839263a-kube-api-access-4zq8d\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: E0129 06:37:32.349820 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:32.849801744 +0000 UTC m=+144.521296301 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.438707 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.448962 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449451 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a994828d-b493-4557-b2fc-d6ac66948306-config-volume\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449479 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8srd\" (UniqueName: \"kubernetes.io/projected/a994828d-b493-4557-b2fc-d6ac66948306-kube-api-access-d8srd\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449500 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-metrics-certs\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449514 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a994828d-b493-4557-b2fc-d6ac66948306-metrics-tls\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449538 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6m7z\" (UniqueName: \"kubernetes.io/projected/3774c246-b396-48e5-bfbc-075fe535b932-kube-api-access-x6m7z\") pod \"package-server-manager-789f6589d5-gjhzk\" (UID: \"3774c246-b396-48e5-bfbc-075fe535b932\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449553 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-srv-cert\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449587 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cead6a1e-5df8-4937-9ee3-71efe555615f-secret-volume\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449606 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt68k\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-kube-api-access-qt68k\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449622 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5d58e215-ddae-413b-819a-27740f79d427-certs\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449652 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-tls\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449666 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl96w\" (UniqueName: \"kubernetes.io/projected/cead6a1e-5df8-4937-9ee3-71efe555615f-kube-api-access-rl96w\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449692 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-certificates\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449804 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zq8d\" (UniqueName: \"kubernetes.io/projected/3be4630d-1e62-4c4d-979b-1e8bc839263a-kube-api-access-4zq8d\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449857 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449874 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449905 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-bound-sa-token\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449937 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3be4630d-1e62-4c4d-979b-1e8bc839263a-profile-collector-cert\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449967 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.449991 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cead6a1e-5df8-4937-9ee3-71efe555615f-config-volume\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450041 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zlhd\" (UniqueName: \"kubernetes.io/projected/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-kube-api-access-5zlhd\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450061 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxmsm\" (UniqueName: \"kubernetes.io/projected/4f0d3cda-4ef5-47ea-8138-881b738c1088-kube-api-access-bxmsm\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450092 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9f4vz\" (UniqueName: \"kubernetes.io/projected/8f74e6b3-bf32-4188-9246-3e164a0857ca-kube-api-access-9f4vz\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450108 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3774c246-b396-48e5-bfbc-075fe535b932-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-gjhzk\" (UID: \"3774c246-b396-48e5-bfbc-075fe535b932\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450136 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvkb7\" (UniqueName: \"kubernetes.io/projected/5d58e215-ddae-413b-819a-27740f79d427-kube-api-access-vvkb7\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450161 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9qpw\" (UniqueName: \"kubernetes.io/projected/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-kube-api-access-j9qpw\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450185 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/4f0d3cda-4ef5-47ea-8138-881b738c1088-signing-cabundle\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450212 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f74e6b3-bf32-4188-9246-3e164a0857ca-service-ca-bundle\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450245 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-stats-auth\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450276 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/4f0d3cda-4ef5-47ea-8138-881b738c1088-signing-key\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450300 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-trusted-ca\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450316 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3be4630d-1e62-4c4d-979b-1e8bc839263a-srv-cert\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450334 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-default-certificate\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450349 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c66ab458-ce20-4c27-99d5-e328b6397bd4-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450424 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c66ab458-ce20-4c27-99d5-e328b6397bd4-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.450441 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5d58e215-ddae-413b-819a-27740f79d427-node-bootstrap-token\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: E0129 06:37:32.450909 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:32.950894718 +0000 UTC m=+144.622389275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.455816 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cead6a1e-5df8-4937-9ee3-71efe555615f-config-volume\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.456511 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-trusted-ca\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.458408 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f74e6b3-bf32-4188-9246-3e164a0857ca-service-ca-bundle\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.462388 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-certificates\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.463231 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c66ab458-ce20-4c27-99d5-e328b6397bd4-ca-trust-extracted\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.463946 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.465192 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3774c246-b396-48e5-bfbc-075fe535b932-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-gjhzk\" (UID: \"3774c246-b396-48e5-bfbc-075fe535b932\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.468289 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3be4630d-1e62-4c4d-979b-1e8bc839263a-profile-collector-cert\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.468600 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/4f0d3cda-4ef5-47ea-8138-881b738c1088-signing-cabundle\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.469119 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.472053 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-24c9r"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.472088 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.472221 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c66ab458-ce20-4c27-99d5-e328b6397bd4-installation-pull-secrets\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.474857 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-stats-auth\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.474977 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-metrics-certs\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.475445 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-srv-cert\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.475801 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8f74e6b3-bf32-4188-9246-3e164a0857ca-default-certificate\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.476824 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cead6a1e-5df8-4937-9ee3-71efe555615f-secret-volume\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.476996 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-tls\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.485836 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3be4630d-1e62-4c4d-979b-1e8bc839263a-srv-cert\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.493484 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/4f0d3cda-4ef5-47ea-8138-881b738c1088-signing-key\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.504765 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.511928 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zlhd\" (UniqueName: \"kubernetes.io/projected/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-kube-api-access-5zlhd\") pod \"marketplace-operator-79b997595-7tpcj\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.524736 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.532002 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxmsm\" (UniqueName: \"kubernetes.io/projected/4f0d3cda-4ef5-47ea-8138-881b738c1088-kube-api-access-bxmsm\") pod \"service-ca-9c57cc56f-f9vlr\" (UID: \"4f0d3cda-4ef5-47ea-8138-881b738c1088\") " pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.532996 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9f4vz\" (UniqueName: \"kubernetes.io/projected/8f74e6b3-bf32-4188-9246-3e164a0857ca-kube-api-access-9f4vz\") pod \"router-default-5444994796-9qj5w\" (UID: \"8f74e6b3-bf32-4188-9246-3e164a0857ca\") " pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.553621 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5d58e215-ddae-413b-819a-27740f79d427-node-bootstrap-token\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.553980 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.554008 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a994828d-b493-4557-b2fc-d6ac66948306-config-volume\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.554025 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8srd\" (UniqueName: \"kubernetes.io/projected/a994828d-b493-4557-b2fc-d6ac66948306-kube-api-access-d8srd\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.554039 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a994828d-b493-4557-b2fc-d6ac66948306-metrics-tls\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.554081 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5d58e215-ddae-413b-819a-27740f79d427-certs\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.554157 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvkb7\" (UniqueName: \"kubernetes.io/projected/5d58e215-ddae-413b-819a-27740f79d427-kube-api-access-vvkb7\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: E0129 06:37:32.554636 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.054618473 +0000 UTC m=+144.726113040 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.557956 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a994828d-b493-4557-b2fc-d6ac66948306-config-volume\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.567841 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6m7z\" (UniqueName: \"kubernetes.io/projected/3774c246-b396-48e5-bfbc-075fe535b932-kube-api-access-x6m7z\") pod \"package-server-manager-789f6589d5-gjhzk\" (UID: \"3774c246-b396-48e5-bfbc-075fe535b932\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.569931 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-qfjmk"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.571805 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5d58e215-ddae-413b-819a-27740f79d427-node-bootstrap-token\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.579754 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a994828d-b493-4557-b2fc-d6ac66948306-metrics-tls\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.580720 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-hrbzh"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.589566 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5d58e215-ddae-413b-819a-27740f79d427-certs\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.591718 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9qpw\" (UniqueName: \"kubernetes.io/projected/2540143e-271c-4f8b-9c7c-1032c7ddb2eb-kube-api-access-j9qpw\") pod \"olm-operator-6b444d44fb-v4shf\" (UID: \"2540143e-271c-4f8b-9c7c-1032c7ddb2eb\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.601858 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zq8d\" (UniqueName: \"kubernetes.io/projected/3be4630d-1e62-4c4d-979b-1e8bc839263a-kube-api-access-4zq8d\") pod \"catalog-operator-68c6474976-fsvsh\" (UID: \"3be4630d-1e62-4c4d-979b-1e8bc839263a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.613298 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt68k\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-kube-api-access-qt68k\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.635681 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-bound-sa-token\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.654513 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl96w\" (UniqueName: \"kubernetes.io/projected/cead6a1e-5df8-4937-9ee3-71efe555615f-kube-api-access-rl96w\") pod \"collect-profiles-29494470-zsdk4\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.659353 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:32 crc kubenswrapper[4861]: E0129 06:37:32.659742 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.159725914 +0000 UTC m=+144.831220471 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.707060 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvkb7\" (UniqueName: \"kubernetes.io/projected/5d58e215-ddae-413b-819a-27740f79d427-kube-api-access-vvkb7\") pod \"machine-config-server-2lxpk\" (UID: \"5d58e215-ddae-413b-819a-27740f79d427\") " pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.719665 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.731986 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rxbpq"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.751016 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8srd\" (UniqueName: \"kubernetes.io/projected/a994828d-b493-4557-b2fc-d6ac66948306-kube-api-access-d8srd\") pod \"dns-default-wxx66\" (UID: \"a994828d-b493-4557-b2fc-d6ac66948306\") " pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.760415 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:32 crc kubenswrapper[4861]: E0129 06:37:32.760778 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.260766117 +0000 UTC m=+144.932260674 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.765546 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.775953 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.785366 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v"] Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.786246 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.800693 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.804930 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.811157 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.819093 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.826393 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.861770 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:32 crc kubenswrapper[4861]: E0129 06:37:32.862061 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.362046446 +0000 UTC m=+145.033541003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.890562 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-2lxpk" Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.893881 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:32 crc kubenswrapper[4861]: W0129 06:37:32.938525 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2f0a0e7_e725_462f_afe4_b76db9ba2864.slice/crio-30369068883be850521a7f15812043b281125ef05f58e464d9c5b6c514f029da WatchSource:0}: Error finding container 30369068883be850521a7f15812043b281125ef05f58e464d9c5b6c514f029da: Status 404 returned error can't find the container with id 30369068883be850521a7f15812043b281125ef05f58e464d9c5b6c514f029da Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.949422 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts"] Jan 29 06:37:32 crc kubenswrapper[4861]: W0129 06:37:32.962507 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod913a3527_6d79_4441_8895_a9212004f20b.slice/crio-74d49eebc5e826a34f162aeffb632cd49b23e24203b0e7dcca4a651f538dc0a5 WatchSource:0}: Error finding container 74d49eebc5e826a34f162aeffb632cd49b23e24203b0e7dcca4a651f538dc0a5: Status 404 returned error can't find the container with id 74d49eebc5e826a34f162aeffb632cd49b23e24203b0e7dcca4a651f538dc0a5 Jan 29 06:37:32 crc kubenswrapper[4861]: E0129 06:37:32.964768 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.464751493 +0000 UTC m=+145.136246050 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:32 crc kubenswrapper[4861]: I0129 06:37:32.964219 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.012923 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" event={"ID":"e5e6daea-2570-40a3-8139-045ce39e48f1","Type":"ContainerStarted","Data":"3300ddf8143375816a8349c1554fcfcb4fa20627397cf2461fe869f497a81bf3"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.012966 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" event={"ID":"e5e6daea-2570-40a3-8139-045ce39e48f1","Type":"ContainerStarted","Data":"7409857dc24f055e60654fe8c3b6f9db16bdfae823bc4c1b5f778c49d39d7d46"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.021772 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-9qj5w" event={"ID":"8f74e6b3-bf32-4188-9246-3e164a0857ca","Type":"ContainerStarted","Data":"7f37b010870a18b3538ca0c4deb99576a98ca72622a6c2a733c5fafcb500e838"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.025173 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" event={"ID":"25d0880d-c427-464c-bd37-cda67e6502da","Type":"ContainerStarted","Data":"150e99c7b8fed834b32766ff4dc07d8aeecfc02420bd27d9fd54cab736fb5efb"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.026475 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.034023 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qsbrr"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.060673 4861 generic.go:334] "Generic (PLEG): container finished" podID="9c7d54e5-91df-4e16-ae91-a4310316c572" containerID="ecf1b5c1ae9749804b0b93fe0ca2854222e798ebc0da6113ccaab07afa8e7342" exitCode=0 Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.060764 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" event={"ID":"9c7d54e5-91df-4e16-ae91-a4310316c572","Type":"ContainerDied","Data":"ecf1b5c1ae9749804b0b93fe0ca2854222e798ebc0da6113ccaab07afa8e7342"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.067939 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.068149 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.568120689 +0000 UTC m=+145.239615236 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.068252 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.068715 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.568706954 +0000 UTC m=+145.240201511 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.069313 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" event={"ID":"3a5a7122-9da5-4b48-9282-c90135f1b339","Type":"ContainerStarted","Data":"6ee4865c13048576020a7cb135f6f0b192daa5f7477aab83db15e689ab50c1a7"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.080686 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" event={"ID":"913a3527-6d79-4441-8895-a9212004f20b","Type":"ContainerStarted","Data":"74d49eebc5e826a34f162aeffb632cd49b23e24203b0e7dcca4a651f538dc0a5"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.096388 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" event={"ID":"a8ab995e-981b-45e3-a79e-481f09d9e9d6","Type":"ContainerStarted","Data":"d08189575d78f7114fb0bdad7f541953a59e17a0d23ec0aef9b5c6c178de967f"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.096440 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" event={"ID":"a8ab995e-981b-45e3-a79e-481f09d9e9d6","Type":"ContainerStarted","Data":"18b15903cba43b4daa655fdba003186b2b18e3c71988ca16056ebc42d4daf6d9"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.110252 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" event={"ID":"ffe2c087-e478-43ea-89b2-af4c64778c35","Type":"ContainerStarted","Data":"72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.111118 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.130151 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" event={"ID":"952eba6c-ad61-4a8e-9cae-77e3285fed37","Type":"ContainerStarted","Data":"9ce8fde47c1882f502e7fe81d55fda8f46f5cb163b9bdfe9372dfd85c5e7feab"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.130676 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.131124 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" event={"ID":"5319e102-d348-45b0-ab8b-1cf7c04a7c54","Type":"ContainerStarted","Data":"7ea193821cfa4c643e873c29aa1aff9be63f1413c8f06622555e6a6380c7646e"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.135691 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" event={"ID":"ee0c951e-d5e0-40f6-8591-e6251fd23376","Type":"ContainerStarted","Data":"7932969df1d0f4ffde3b6390025db90dc06263943aa68a5a4e85ea6db1682cb7"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.135724 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" event={"ID":"ee0c951e-d5e0-40f6-8591-e6251fd23376","Type":"ContainerStarted","Data":"1949a48289d98f0b0b83cc334f6efaeb151c93ef0792ee641e4247d40db38af7"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.142110 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" event={"ID":"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1","Type":"ContainerStarted","Data":"fd4a7bce7c00f9f92eb15d532506990346e948a5747e3418b359f27bad251636"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.142361 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" event={"ID":"c67abd0d-bb61-4bd2-a58b-42f2969e1ac1","Type":"ContainerStarted","Data":"4878708dc57ba8ccff20939921c221fd350b266c4bf293b27d3f9652c0ffcef3"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.143346 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" event={"ID":"f2f0a0e7-e725-462f-afe4-b76db9ba2864","Type":"ContainerStarted","Data":"30369068883be850521a7f15812043b281125ef05f58e464d9c5b6c514f029da"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.153338 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" event={"ID":"0071b780-153c-49f4-89c1-669953dab11b","Type":"ContainerStarted","Data":"651434944d4e0ee121b032d49e51a1d3587c5eb3ae1c37a4a4afbee5b864e1b1"} Jan 29 06:37:33 crc kubenswrapper[4861]: W0129 06:37:33.154962 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf095a543_8e25_4662_af3b_fa3cabdae61a.slice/crio-ad7714968629ec2b297ef7f340d3b0c8a0ffd6b4967508e64121999c1836a384 WatchSource:0}: Error finding container ad7714968629ec2b297ef7f340d3b0c8a0ffd6b4967508e64121999c1836a384: Status 404 returned error can't find the container with id ad7714968629ec2b297ef7f340d3b0c8a0ffd6b4967508e64121999c1836a384 Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.161537 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-qfjmk" event={"ID":"39b20f61-cd5e-41c6-bce9-35eaa98d85ab","Type":"ContainerStarted","Data":"c40cce8dd9cf739627c9d885f0aae24b1661895494ba146ffc1c533f8682adea"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.175955 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-jng8m" event={"ID":"f7f7f0b2-fcab-4777-bd72-60bf1b3fede4","Type":"ContainerStarted","Data":"7bf5074cc887fdf1ed669710272cc06d6a9c2a0383f1f6c2b7eba6a7bb0a79e8"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.176679 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.176999 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.676980461 +0000 UTC m=+145.348475018 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.177157 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.178160 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.678146382 +0000 UTC m=+145.349640929 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.204669 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" event={"ID":"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36","Type":"ContainerStarted","Data":"1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.204709 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" event={"ID":"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36","Type":"ContainerStarted","Data":"f6b4456cdf8672d96ac8eb836920b2eb1b8d5af0c356e3cd7d7aacddb2529336"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.205677 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.209582 4861 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-pzvfs container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.209667 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" podUID="cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.217819 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" event={"ID":"407f7505-8386-467a-9b71-e1aea70b9c3d","Type":"ContainerStarted","Data":"49eb4def00759e3ed82b1f5bd83119cf9b792cddccf7241eb97ca90e3759788f"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.221149 4861 generic.go:334] "Generic (PLEG): container finished" podID="3155916f-0fd6-4e27-9ddc-d0cff45ae575" containerID="7efea89c97ee0ee6dfb7bbb9fa6a62ba74bf5b5bce5132e1b6439d32b83f9a63" exitCode=0 Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.221213 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" event={"ID":"3155916f-0fd6-4e27-9ddc-d0cff45ae575","Type":"ContainerDied","Data":"7efea89c97ee0ee6dfb7bbb9fa6a62ba74bf5b5bce5132e1b6439d32b83f9a63"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.227474 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-hrbzh" event={"ID":"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed","Type":"ContainerStarted","Data":"9f1f982f29279d544b208e1a85f5874ae7c8e34d4967fd433cc90e573b7dfeec"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.229664 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.229977 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" event={"ID":"580b962b-dde1-4614-9f11-e3a6cb0dc860","Type":"ContainerStarted","Data":"cd71534c73abb57bd2254443262e2fb32ce80fa5863d2e7f7d4ca7dc0c446f07"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.247022 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" event={"ID":"998c663f-cba1-49d6-8685-14f84d3fa118","Type":"ContainerStarted","Data":"d62d003cea3f6d5bfba4ab6ca910e7554e71e40380d48b73b1c586af7746d338"} Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.279401 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.280390 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.780373855 +0000 UTC m=+145.451868412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.294577 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.298081 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.310655 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.313201 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-x82xx"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.363564 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.381020 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.383449 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.883434412 +0000 UTC m=+145.554928969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: W0129 06:37:33.384168 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod666760cf_9fb4_415b_929e_14212d7cf828.slice/crio-a8892db40697c5dd48fa847a5007841b57cc556cd8559d436cb42eee075ff859 WatchSource:0}: Error finding container a8892db40697c5dd48fa847a5007841b57cc556cd8559d436cb42eee075ff859: Status 404 returned error can't find the container with id a8892db40697c5dd48fa847a5007841b57cc556cd8559d436cb42eee075ff859 Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.402475 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-vvhvw"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.475927 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.481996 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.482165 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.982142002 +0000 UTC m=+145.653636559 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.482304 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.482578 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:33.982564934 +0000 UTC m=+145.654059491 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.531645 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-wxx66"] Jan 29 06:37:33 crc kubenswrapper[4861]: W0129 06:37:33.574831 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ee9ba97_39ba_4d37_a844_16bfa7d16afc.slice/crio-95a51b0c8aa82786ec6c109ed2d2a7da17174fa6eac1f2e5965af55b8e801565 WatchSource:0}: Error finding container 95a51b0c8aa82786ec6c109ed2d2a7da17174fa6eac1f2e5965af55b8e801565: Status 404 returned error can't find the container with id 95a51b0c8aa82786ec6c109ed2d2a7da17174fa6eac1f2e5965af55b8e801565 Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.585091 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.586325 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.086226457 +0000 UTC m=+145.757721024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: W0129 06:37:33.589455 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda994828d_b493_4557_b2fc_d6ac66948306.slice/crio-a96be0565c9b7df0978391a82328201f4f5d9e4fc38cf52dc1fa6889295b577f WatchSource:0}: Error finding container a96be0565c9b7df0978391a82328201f4f5d9e4fc38cf52dc1fa6889295b577f: Status 404 returned error can't find the container with id a96be0565c9b7df0978391a82328201f4f5d9e4fc38cf52dc1fa6889295b577f Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.641345 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.646125 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.649825 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.666339 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.694725 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.694976 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.194965255 +0000 UTC m=+145.866459802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.713442 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7tpcj"] Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.724793 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-f9vlr"] Jan 29 06:37:33 crc kubenswrapper[4861]: W0129 06:37:33.729588 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3774c246_b396_48e5_bfbc_075fe535b932.slice/crio-7b35180a7ed4e9f6a81dbfbd9ea49e8b9bc924644012049afd11ac9226ede0b0 WatchSource:0}: Error finding container 7b35180a7ed4e9f6a81dbfbd9ea49e8b9bc924644012049afd11ac9226ede0b0: Status 404 returned error can't find the container with id 7b35180a7ed4e9f6a81dbfbd9ea49e8b9bc924644012049afd11ac9226ede0b0 Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.795741 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.796026 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.296010678 +0000 UTC m=+145.967505235 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.897208 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:33 crc kubenswrapper[4861]: E0129 06:37:33.897549 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.397536974 +0000 UTC m=+146.069031531 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:33 crc kubenswrapper[4861]: I0129 06:37:33.999613 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:33.999769 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.499748138 +0000 UTC m=+146.171242685 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.000102 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.000428 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.500418056 +0000 UTC m=+146.171912623 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.018895 4861 csr.go:261] certificate signing request csr-cftql is approved, waiting to be issued Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.025674 4861 csr.go:257] certificate signing request csr-cftql is issued Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.101563 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.101869 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.601841369 +0000 UTC m=+146.273335916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.102034 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.102354 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.602346533 +0000 UTC m=+146.273841090 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.168470 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8lw4d" podStartSLOduration=123.168454281 podStartE2EDuration="2m3.168454281s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.164640139 +0000 UTC m=+145.836134696" watchObservedRunningTime="2026-01-29 06:37:34.168454281 +0000 UTC m=+145.839948838" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.202691 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.202972 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.702952774 +0000 UTC m=+146.374447331 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.220640 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t7qwz" podStartSLOduration=123.220607576 podStartE2EDuration="2m3.220607576s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.217969986 +0000 UTC m=+145.889464543" watchObservedRunningTime="2026-01-29 06:37:34.220607576 +0000 UTC m=+145.892102133" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.255172 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" podStartSLOduration=123.25515728 podStartE2EDuration="2m3.25515728s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.253190208 +0000 UTC m=+145.924684775" watchObservedRunningTime="2026-01-29 06:37:34.25515728 +0000 UTC m=+145.926651837" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.312843 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-qfjmk" event={"ID":"39b20f61-cd5e-41c6-bce9-35eaa98d85ab","Type":"ContainerStarted","Data":"5a858a4099fcfd09a6d555f24e09d12f25384846dbcde0902da4b112b9514c62"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.313017 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.313440 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.813429169 +0000 UTC m=+146.484923726 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.313778 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.321356 4861 patch_prober.go:28] interesting pod/console-operator-58897d9998-qfjmk container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.321402 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-qfjmk" podUID="39b20f61-cd5e-41c6-bce9-35eaa98d85ab" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.341037 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" event={"ID":"40e2a853-6297-48dc-85dd-7eccd47c2907","Type":"ContainerStarted","Data":"8b31abb5f3d2e0d92b0a81e627b3db977c096a00443777e2dad34fd6ca653d2a"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.341104 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" event={"ID":"40e2a853-6297-48dc-85dd-7eccd47c2907","Type":"ContainerStarted","Data":"93f5eb054f6ea61a5ff25392e7b150d81eaefcb7fd2615c81fb3868cef0a29e6"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.341901 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-r2hxt" podStartSLOduration=123.34188689 podStartE2EDuration="2m3.34188689s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.340696038 +0000 UTC m=+146.012190615" watchObservedRunningTime="2026-01-29 06:37:34.34188689 +0000 UTC m=+146.013381447" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.341994 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4mzns" podStartSLOduration=123.341990453 podStartE2EDuration="2m3.341990453s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.311759224 +0000 UTC m=+145.983253781" watchObservedRunningTime="2026-01-29 06:37:34.341990453 +0000 UTC m=+146.013485010" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.375481 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" event={"ID":"2540143e-271c-4f8b-9c7c-1032c7ddb2eb","Type":"ContainerStarted","Data":"cc8cfc95dc48a26384b313495537f073b052a00a4df45f11f8554e794dae85d4"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.379862 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.379890 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" event={"ID":"2540143e-271c-4f8b-9c7c-1032c7ddb2eb","Type":"ContainerStarted","Data":"c6883901e5b3b93d1478362a05fe27f452ded599023227228e3f2ed22ed66628"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.380323 4861 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-v4shf container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.380392 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" podUID="2540143e-271c-4f8b-9c7c-1032c7ddb2eb" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.389410 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" event={"ID":"4f0d3cda-4ef5-47ea-8138-881b738c1088","Type":"ContainerStarted","Data":"163a711c796c45f5b96e77008a422f6834abe6f3333418a22bf95e51521899c4"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.393253 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" event={"ID":"3155916f-0fd6-4e27-9ddc-d0cff45ae575","Type":"ContainerStarted","Data":"ce28af6d53eb8eebcf61dedae00f0e8a1dc9f5ca3493738d2dd7b1bbf4b2286f"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.406882 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" event={"ID":"3774c246-b396-48e5-bfbc-075fe535b932","Type":"ContainerStarted","Data":"7b35180a7ed4e9f6a81dbfbd9ea49e8b9bc924644012049afd11ac9226ede0b0"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.409935 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" event={"ID":"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8","Type":"ContainerStarted","Data":"8446ae1dd5d8ad7b3572929ee066aedd91f0540e8e4a4a3939fcc786760854c1"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.414378 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.414981 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:34.914967365 +0000 UTC m=+146.586461922 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.428826 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" event={"ID":"998c663f-cba1-49d6-8685-14f84d3fa118","Type":"ContainerStarted","Data":"65aaa0b1e0a7beabbd86dd79ccbb72661b587862a00356258f8585e58c08a624"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.433140 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-9qj5w" event={"ID":"8f74e6b3-bf32-4188-9246-3e164a0857ca","Type":"ContainerStarted","Data":"800401f623527c10a804e3c7897343527ac4983b64472c1a1ba78bd499eadc3d"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.446677 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" event={"ID":"666760cf-9fb4-415b-929e-14212d7cf828","Type":"ContainerStarted","Data":"a8892db40697c5dd48fa847a5007841b57cc556cd8559d436cb42eee075ff859"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.447631 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" podStartSLOduration=123.447614278 podStartE2EDuration="2m3.447614278s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.44654024 +0000 UTC m=+146.118034827" watchObservedRunningTime="2026-01-29 06:37:34.447614278 +0000 UTC m=+146.119108835" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.449810 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" podStartSLOduration=123.449803187 podStartE2EDuration="2m3.449803187s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.412561511 +0000 UTC m=+146.084056078" watchObservedRunningTime="2026-01-29 06:37:34.449803187 +0000 UTC m=+146.121297744" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.467954 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-hrbzh" event={"ID":"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed","Type":"ContainerStarted","Data":"d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.490652 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-cwtjn" podStartSLOduration=123.490635579 podStartE2EDuration="2m3.490635579s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.489747796 +0000 UTC m=+146.161242353" watchObservedRunningTime="2026-01-29 06:37:34.490635579 +0000 UTC m=+146.162130136" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.501700 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" event={"ID":"580b962b-dde1-4614-9f11-e3a6cb0dc860","Type":"ContainerStarted","Data":"0ed17377f07f5e25dd6c932b7dd5bbb3c4f21be01578d5d362b42976a8a0a5bd"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.511540 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" event={"ID":"9c7d54e5-91df-4e16-ae91-a4310316c572","Type":"ContainerStarted","Data":"72be139cdb9d99c019a88837fce94b772cfc6e188f4c3e15d8e12be0d5431b17"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.515889 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.518340 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.01832737 +0000 UTC m=+146.689821927 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.531162 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" event={"ID":"cead6a1e-5df8-4937-9ee3-71efe555615f","Type":"ContainerStarted","Data":"64848bd9fbdd13a9af22fbdb989eb31339fc8d7ab2aaaad45f0e2978da015e80"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.537605 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wxx66" event={"ID":"a994828d-b493-4557-b2fc-d6ac66948306","Type":"ContainerStarted","Data":"a96be0565c9b7df0978391a82328201f4f5d9e4fc38cf52dc1fa6889295b577f"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.573608 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-vvhvw" event={"ID":"3ee9ba97-39ba-4d37-a844-16bfa7d16afc","Type":"ContainerStarted","Data":"95a51b0c8aa82786ec6c109ed2d2a7da17174fa6eac1f2e5965af55b8e801565"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.580287 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-5d7b7" podStartSLOduration=123.580268247 podStartE2EDuration="2m3.580268247s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.579513677 +0000 UTC m=+146.251008234" watchObservedRunningTime="2026-01-29 06:37:34.580268247 +0000 UTC m=+146.251762794" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.581040 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-qfjmk" podStartSLOduration=123.581034007 podStartE2EDuration="2m3.581034007s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.537860813 +0000 UTC m=+146.209355370" watchObservedRunningTime="2026-01-29 06:37:34.581034007 +0000 UTC m=+146.252528564" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.591432 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" event={"ID":"efafc8de-1135-4405-bc1c-11c2efcdb99a","Type":"ContainerStarted","Data":"3798f866d22dd37076ada397e2a6ed50675ee90a5ff7b2d51baef952d145c63c"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.592115 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" event={"ID":"efafc8de-1135-4405-bc1c-11c2efcdb99a","Type":"ContainerStarted","Data":"41e29869b96dc64a11ad0d4e1a96002e64ef2c90972eeeb38ed23bca1e8ed75d"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.615364 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" event={"ID":"407f7505-8386-467a-9b71-e1aea70b9c3d","Type":"ContainerStarted","Data":"440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.616248 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.616583 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-9qj5w" podStartSLOduration=123.616573128 podStartE2EDuration="2m3.616573128s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.615418707 +0000 UTC m=+146.286913274" watchObservedRunningTime="2026-01-29 06:37:34.616573128 +0000 UTC m=+146.288067685" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.616764 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.618799 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.118781877 +0000 UTC m=+146.790276434 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.619560 4861 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-s46nf container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.24:6443/healthz\": dial tcp 10.217.0.24:6443: connect: connection refused" start-of-body= Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.619601 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.24:6443/healthz\": dial tcp 10.217.0.24:6443: connect: connection refused" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.625368 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" event={"ID":"80eb150e-f53d-41ef-8338-41643b573ad1","Type":"ContainerStarted","Data":"438c95011b6a80efd75b8330be097ff9ff0813b013a661662c66f0a8f0cbb114"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.628348 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" event={"ID":"f095a543-8e25-4662-af3b-fa3cabdae61a","Type":"ContainerStarted","Data":"0547e7e8700391f70ded08134a6d9629e1b09fb23f9badae5c4a1d2073260685"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.628371 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" event={"ID":"f095a543-8e25-4662-af3b-fa3cabdae61a","Type":"ContainerStarted","Data":"ad7714968629ec2b297ef7f340d3b0c8a0ffd6b4967508e64121999c1836a384"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.634885 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" event={"ID":"bee90beb-2799-47b4-8a1c-e8185a60e8d7","Type":"ContainerStarted","Data":"3990de01894d8ee0e40d1eb762a9c199f64ad2b0551874ac148cc010e1778774"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.636463 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" event={"ID":"3be4630d-1e62-4c4d-979b-1e8bc839263a","Type":"ContainerStarted","Data":"db05d41f2551da3b32bc0546529b14eea5893327ef24a72463355cbe9957908e"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.642265 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" event={"ID":"5319e102-d348-45b0-ab8b-1cf7c04a7c54","Type":"ContainerStarted","Data":"8fc111e4336b380a6294e54f121dce0767f9efdfaecf4dee9620224a93e2de73"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.648202 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x82xx" event={"ID":"841455ff-8571-49ca-9aec-fb055f63bbef","Type":"ContainerStarted","Data":"6a6258798958da0770f89a1b5102abe2be2f8401f3b90b3c5d411f34f7e46610"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.650526 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" event={"ID":"25d0880d-c427-464c-bd37-cda67e6502da","Type":"ContainerStarted","Data":"88fd72cd3f8674fa81639d747b90ed5e75b44bf29ce1bb5d6778e757e0dfb6d9"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.653330 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-2lxpk" event={"ID":"5d58e215-ddae-413b-819a-27740f79d427","Type":"ContainerStarted","Data":"94a7d88bc6c9917fc5b5d7abe34e599a8f626146ac8ec1626ddb39839d95d0cc"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.653358 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-2lxpk" event={"ID":"5d58e215-ddae-413b-819a-27740f79d427","Type":"ContainerStarted","Data":"4f300612555222e76a2bfc2abf8d0d8059a5e6ce77abc40ef6bebf36eb791146"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.665017 4861 generic.go:334] "Generic (PLEG): container finished" podID="3a5a7122-9da5-4b48-9282-c90135f1b339" containerID="2419190a4435d407fe42b8452c6e95a211a738f49a57b474dd541201010ffad7" exitCode=0 Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.665152 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" event={"ID":"3a5a7122-9da5-4b48-9282-c90135f1b339","Type":"ContainerDied","Data":"2419190a4435d407fe42b8452c6e95a211a738f49a57b474dd541201010ffad7"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.675603 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" event={"ID":"f2f0a0e7-e725-462f-afe4-b76db9ba2864","Type":"ContainerStarted","Data":"e5fe11c6f6709e1b99f6474ca8fd7ccffa31acf8f759fe6b881b7166347e4def"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.717645 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-lzhnd" podStartSLOduration=123.717630681 podStartE2EDuration="2m3.717630681s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.679034389 +0000 UTC m=+146.350528946" watchObservedRunningTime="2026-01-29 06:37:34.717630681 +0000 UTC m=+146.389125238" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.718435 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.720632 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.220620111 +0000 UTC m=+146.892114668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.737100 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" podStartSLOduration=123.737056911 podStartE2EDuration="2m3.737056911s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.719340247 +0000 UTC m=+146.390834804" watchObservedRunningTime="2026-01-29 06:37:34.737056911 +0000 UTC m=+146.408551468" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.771370 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" event={"ID":"0071b780-153c-49f4-89c1-669953dab11b","Type":"ContainerStarted","Data":"0e3370690d5fa4d4f7851f440ca323b4ab834065c01e0dd0cec1f65d90e563eb"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.771589 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.777575 4861 patch_prober.go:28] interesting pod/router-default-5444994796-9qj5w container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.777612 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9qj5w" podUID="8f74e6b3-bf32-4188-9246-3e164a0857ca" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.808206 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-2lxpk" podStartSLOduration=5.808191184 podStartE2EDuration="5.808191184s" podCreationTimestamp="2026-01-29 06:37:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.807196397 +0000 UTC m=+146.478690954" watchObservedRunningTime="2026-01-29 06:37:34.808191184 +0000 UTC m=+146.479685741" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.808916 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-hrbzh" podStartSLOduration=123.808910023 podStartE2EDuration="2m3.808910023s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.739519157 +0000 UTC m=+146.411013724" watchObservedRunningTime="2026-01-29 06:37:34.808910023 +0000 UTC m=+146.480404580" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.825377 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" event={"ID":"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3","Type":"ContainerStarted","Data":"724a1208bb7f50764d1a7b57ca02e98f8c7a2e41d5e5e52ff6a5d313c7e6901e"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.825713 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.827141 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.32712527 +0000 UTC m=+146.998619827 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.860961 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hxdzl" podStartSLOduration=123.860939405 podStartE2EDuration="2m3.860939405s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.859357632 +0000 UTC m=+146.530852199" watchObservedRunningTime="2026-01-29 06:37:34.860939405 +0000 UTC m=+146.532433962" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.910672 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" event={"ID":"952eba6c-ad61-4a8e-9cae-77e3285fed37","Type":"ContainerStarted","Data":"bedf64d517aebdc7fbb09584a8b22f3e8df02c4a5ce28d3cf9d7536441fd8cc3"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.914656 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-vvhvw" podStartSLOduration=5.914634921 podStartE2EDuration="5.914634921s" podCreationTimestamp="2026-01-29 06:37:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.900354969 +0000 UTC m=+146.571849536" watchObservedRunningTime="2026-01-29 06:37:34.914634921 +0000 UTC m=+146.586129478" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.931530 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:34 crc kubenswrapper[4861]: E0129 06:37:34.931929 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.431916623 +0000 UTC m=+147.103411180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.941006 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-jng8m" event={"ID":"f7f7f0b2-fcab-4777-bd72-60bf1b3fede4","Type":"ContainerStarted","Data":"158fa02838fe5041147c367ab1fa8024797c426a1e28a3af58094003dede5457"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.941988 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-jng8m" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.949768 4861 patch_prober.go:28] interesting pod/downloads-7954f5f757-jng8m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.949817 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jng8m" podUID="f7f7f0b2-fcab-4777-bd72-60bf1b3fede4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.954618 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" event={"ID":"6a627dba-db5e-46f8-b214-eb8a9d3e44ed","Type":"ContainerStarted","Data":"966a06fb5f0f19e8b4ca5fde0f0e616f557068c517a7bdddc265cff2cadbeac6"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.954651 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" event={"ID":"6a627dba-db5e-46f8-b214-eb8a9d3e44ed","Type":"ContainerStarted","Data":"cf79cc01290637fd6d7667d06c45a72d12c84f34c9199d5c94037a609b8d3b1a"} Jan 29 06:37:34 crc kubenswrapper[4861]: I0129 06:37:34.983501 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" podStartSLOduration=123.976913757 podStartE2EDuration="2m3.976913757s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.976221519 +0000 UTC m=+146.647716086" watchObservedRunningTime="2026-01-29 06:37:34.976913757 +0000 UTC m=+146.648408314" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.012448 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" event={"ID":"7edaa7aa-38fe-494a-84a9-f533d9710d1b","Type":"ContainerStarted","Data":"8d55ce5603329054df945d2b75e1ac7ba5b702ed31b6b00be31aa9eb76588cee"} Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.013110 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-k9th8" podStartSLOduration=124.013088695 podStartE2EDuration="2m4.013088695s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:34.997495288 +0000 UTC m=+146.668989845" watchObservedRunningTime="2026-01-29 06:37:35.013088695 +0000 UTC m=+146.684583252" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.026046 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" event={"ID":"913a3527-6d79-4441-8895-a9212004f20b","Type":"ContainerStarted","Data":"ad61168ed192ef38db1c33ac67fa42834e89c8ca9de4109cf453ce5c938ff079"} Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.027316 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-29 06:32:34 +0000 UTC, rotation deadline is 2026-11-02 22:09:42.441190376 +0000 UTC Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.027359 4861 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6663h32m7.4138347s for next certificate rotation Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.032615 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.033699 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.533674215 +0000 UTC m=+147.205168772 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.067718 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.075105 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-vjvgq" podStartSLOduration=124.075089893 podStartE2EDuration="2m4.075089893s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:35.036671766 +0000 UTC m=+146.708166343" watchObservedRunningTime="2026-01-29 06:37:35.075089893 +0000 UTC m=+146.746584450" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.075559 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-jng8m" podStartSLOduration=124.075555136 podStartE2EDuration="2m4.075555136s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:35.074273251 +0000 UTC m=+146.745767808" watchObservedRunningTime="2026-01-29 06:37:35.075555136 +0000 UTC m=+146.747049693" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.120247 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gqnvz" podStartSLOduration=124.120228571 podStartE2EDuration="2m4.120228571s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:35.102777304 +0000 UTC m=+146.774271851" watchObservedRunningTime="2026-01-29 06:37:35.120228571 +0000 UTC m=+146.791723128" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.136108 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.136592 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" podStartSLOduration=124.136578258 podStartE2EDuration="2m4.136578258s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:35.134182444 +0000 UTC m=+146.805677001" watchObservedRunningTime="2026-01-29 06:37:35.136578258 +0000 UTC m=+146.808072815" Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.138784 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.638772867 +0000 UTC m=+147.310267424 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.216234 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-rxbpq" podStartSLOduration=124.216217988 podStartE2EDuration="2m4.216217988s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:35.180829112 +0000 UTC m=+146.852323669" watchObservedRunningTime="2026-01-29 06:37:35.216217988 +0000 UTC m=+146.887712545" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.238006 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.238293 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.738277898 +0000 UTC m=+147.409772445 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.339961 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.340369 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.840351019 +0000 UTC m=+147.511845576 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.441824 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.442008 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.941984877 +0000 UTC m=+147.613479434 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.442088 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.442427 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:35.942415639 +0000 UTC m=+147.613910186 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.543626 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.543793 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.04376811 +0000 UTC m=+147.715262667 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.543940 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.544214 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.044202472 +0000 UTC m=+147.715697029 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.646309 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.646476 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.146450127 +0000 UTC m=+147.817944694 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.646929 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.647264 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.147255688 +0000 UTC m=+147.818750245 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.747840 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.748029 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.248004673 +0000 UTC m=+147.919499230 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.748238 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.748557 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.248543608 +0000 UTC m=+147.920038165 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.772123 4861 patch_prober.go:28] interesting pod/router-default-5444994796-9qj5w container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 06:37:35 crc kubenswrapper[4861]: [-]has-synced failed: reason withheld Jan 29 06:37:35 crc kubenswrapper[4861]: [+]process-running ok Jan 29 06:37:35 crc kubenswrapper[4861]: healthz check failed Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.772177 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9qj5w" podUID="8f74e6b3-bf32-4188-9246-3e164a0857ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.849175 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.849529 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.349513359 +0000 UTC m=+148.021007916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:35 crc kubenswrapper[4861]: I0129 06:37:35.950892 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:35 crc kubenswrapper[4861]: E0129 06:37:35.951213 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.451202379 +0000 UTC m=+148.122696926 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.031258 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" event={"ID":"3a5a7122-9da5-4b48-9282-c90135f1b339","Type":"ContainerStarted","Data":"2dc50e534b574872d93812cafb42425f7d06d1b47ac0dd47c2675cdde89bf656"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.031936 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.033686 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x82xx" event={"ID":"841455ff-8571-49ca-9aec-fb055f63bbef","Type":"ContainerStarted","Data":"6aa606fa12f2e346b8d2de4dc073f4167b20bb6d25c9067b67a72d040e74f795"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.044483 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" event={"ID":"0071b780-153c-49f4-89c1-669953dab11b","Type":"ContainerStarted","Data":"8d63ff84b987da84dd7cca13bdb5f38cb89e2127f7992b98e6cc99027984e7e9"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.051991 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" event={"ID":"3774c246-b396-48e5-bfbc-075fe535b932","Type":"ContainerStarted","Data":"492cd78e02be02c778de41eaa9ff8994dae35bcc8964e1f73041044f319096b6"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.052055 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" event={"ID":"3774c246-b396-48e5-bfbc-075fe535b932","Type":"ContainerStarted","Data":"587c8db295a156ef6f8530a7b7290c2f62d4def1db7f61344a5b1fa19f51a1f3"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.052430 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.052718 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.052845 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.552830517 +0000 UTC m=+148.224325074 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.060706 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" event={"ID":"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3","Type":"ContainerStarted","Data":"a2c61a1de90ac25dc67452730f0600283a9513bff2fe9648523a12ad0a19ec8d"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.061151 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.062411 4861 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-7tpcj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.062460 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: connect: connection refused" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.063778 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" podStartSLOduration=125.06376513 podStartE2EDuration="2m5.06376513s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.053022612 +0000 UTC m=+147.724517179" watchObservedRunningTime="2026-01-29 06:37:36.06376513 +0000 UTC m=+147.735259687" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.072825 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" event={"ID":"666760cf-9fb4-415b-929e-14212d7cf828","Type":"ContainerStarted","Data":"af905d510e24888b9f01cd1c2012d5597eb6f2bfd1c70a87633b2008dfe1c11c"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.075093 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" event={"ID":"80eb150e-f53d-41ef-8338-41643b573ad1","Type":"ContainerStarted","Data":"fd956769e29027b5286546aae23293511294c0298edf1cb7c3ed52231c26ea21"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.075129 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" event={"ID":"80eb150e-f53d-41ef-8338-41643b573ad1","Type":"ContainerStarted","Data":"c34bd10372ed6149079c798dcb7d10f6edbb408b3c3ea51c5926674037c1f7d6"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.086954 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" event={"ID":"2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8","Type":"ContainerStarted","Data":"7c563fd4730e20e90283f2dc46c8d0750ced204165f4f141aa5de873a755564b"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.087175 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.088833 4861 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-vvwrw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.088879 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" podUID="2ee1ee73-ccc9-456e-ac62-b37ea3f0abf8" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.098761 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wxx66" event={"ID":"a994828d-b493-4557-b2fc-d6ac66948306","Type":"ContainerStarted","Data":"d7274b6dcc61baf9c9ac47caeb9380cfbcc4555dd9bc68a1d02375c4b9ea53e7"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.098801 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-wxx66" event={"ID":"a994828d-b493-4557-b2fc-d6ac66948306","Type":"ContainerStarted","Data":"1ea31ebfbe3c49a7d15dfbffe2e98c4cc92fcfac1448cea2196750ba1acb94a0"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.099384 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.105793 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" event={"ID":"4f0d3cda-4ef5-47ea-8138-881b738c1088","Type":"ContainerStarted","Data":"56fa168d72da0cda85523ea81ebb9b6e575858debf3ea3f6ad6908a0a0b129b6"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.107621 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" event={"ID":"952eba6c-ad61-4a8e-9cae-77e3285fed37","Type":"ContainerStarted","Data":"ce3cec357a7eaf7f67eaf0fe6f728d86cc071da89f90abd4beaf999c4b0853b4"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.115943 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" event={"ID":"3155916f-0fd6-4e27-9ddc-d0cff45ae575","Type":"ContainerStarted","Data":"9eb9d1675498a8b506eb4a3bf9186d8bb9ee77f270b6b498738ceb5d27f1d7b4"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.122287 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lbl9s" event={"ID":"7edaa7aa-38fe-494a-84a9-f533d9710d1b","Type":"ContainerStarted","Data":"bfb8f3c85df5eff10013a8c51677dcababd0d441671b4e106475fdd1bfdba503"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.123979 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" event={"ID":"f095a543-8e25-4662-af3b-fa3cabdae61a","Type":"ContainerStarted","Data":"2531342d71e10a6c366e63ce162f46c1cf2c0d6930e8331dfea3b05794ae9585"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.136703 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" event={"ID":"bee90beb-2799-47b4-8a1c-e8185a60e8d7","Type":"ContainerStarted","Data":"fdc2a6934b226f71944e0f2e1b55f04ad3b694676234d6ec5e1c9a0168bd3c05"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.143137 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" event={"ID":"bee90beb-2799-47b4-8a1c-e8185a60e8d7","Type":"ContainerStarted","Data":"b64effc6c21e09138c5843ecc25472670c90e370e0171d1c5d5291efca3e6807"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.158313 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.159366 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.659353107 +0000 UTC m=+148.330847664 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.187096 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" event={"ID":"cead6a1e-5df8-4937-9ee3-71efe555615f","Type":"ContainerStarted","Data":"e4b916c599c0cc7e6a0d63d8fb6a748a231e2a87702527f7c74b582ca5fb4435"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.218288 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-vvhvw" event={"ID":"3ee9ba97-39ba-4d37-a844-16bfa7d16afc","Type":"ContainerStarted","Data":"e387e048f6013a2b4d3b90d71e08e43fa169c4bcf868ff96a43750afe1a724d3"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.226302 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.226367 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.234033 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-bqw59" podStartSLOduration=125.234015064 podStartE2EDuration="2m5.234015064s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.136476205 +0000 UTC m=+147.807970772" watchObservedRunningTime="2026-01-29 06:37:36.234015064 +0000 UTC m=+147.905509621" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.234136 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" podStartSLOduration=125.234133147 podStartE2EDuration="2m5.234133147s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.233975903 +0000 UTC m=+147.905470470" watchObservedRunningTime="2026-01-29 06:37:36.234133147 +0000 UTC m=+147.905627704" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.259548 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.260370 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.760356169 +0000 UTC m=+148.431850726 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.264772 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" event={"ID":"efafc8de-1135-4405-bc1c-11c2efcdb99a","Type":"ContainerStarted","Data":"6e7fcd204975f613657cc89274161c1a2332cb68ec064bc1ef7aaad81499217a"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.282795 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" event={"ID":"3be4630d-1e62-4c4d-979b-1e8bc839263a","Type":"ContainerStarted","Data":"c60233e3d05d248a9b4a59baa31749786bc297bcc8bff8c944da76970b5fe6c2"} Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.283105 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.284704 4861 patch_prober.go:28] interesting pod/downloads-7954f5f757-jng8m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.284751 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jng8m" podUID="f7f7f0b2-fcab-4777-bd72-60bf1b3fede4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.290684 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v4shf" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.301051 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.323356 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-qsbrr" podStartSLOduration=125.323336693 podStartE2EDuration="2m5.323336693s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.321477644 +0000 UTC m=+147.992972211" watchObservedRunningTime="2026-01-29 06:37:36.323336693 +0000 UTC m=+147.994831250" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.361772 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.363543 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.863531698 +0000 UTC m=+148.535026255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.427259 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" podStartSLOduration=125.427241113 podStartE2EDuration="2m5.427241113s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.420848632 +0000 UTC m=+148.092343209" watchObservedRunningTime="2026-01-29 06:37:36.427241113 +0000 UTC m=+148.098735670" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.463140 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.464342 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:36.964322345 +0000 UTC m=+148.635816892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.510464 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.510543 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.520015 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" podStartSLOduration=125.519994874 podStartE2EDuration="2m5.519994874s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.488980214 +0000 UTC m=+148.160474771" watchObservedRunningTime="2026-01-29 06:37:36.519994874 +0000 UTC m=+148.191489421" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.521112 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" podStartSLOduration=125.521106454 podStartE2EDuration="2m5.521106454s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.520080546 +0000 UTC m=+148.191575093" watchObservedRunningTime="2026-01-29 06:37:36.521106454 +0000 UTC m=+148.192601011" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.539288 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-qfjmk" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.565569 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.565924 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.065908702 +0000 UTC m=+148.737403259 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.575583 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hvv84" podStartSLOduration=125.57556614 podStartE2EDuration="2m5.57556614s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.548493266 +0000 UTC m=+148.219987823" watchObservedRunningTime="2026-01-29 06:37:36.57556614 +0000 UTC m=+148.247060697" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.576776 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vhjkq" podStartSLOduration=125.576772533 podStartE2EDuration="2m5.576772533s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.575036506 +0000 UTC m=+148.246531073" watchObservedRunningTime="2026-01-29 06:37:36.576772533 +0000 UTC m=+148.248267090" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.604324 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.620307 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" podStartSLOduration=125.620290877 podStartE2EDuration="2m5.620290877s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.619273549 +0000 UTC m=+148.290768106" watchObservedRunningTime="2026-01-29 06:37:36.620290877 +0000 UTC m=+148.291785434" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.658015 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fnrr2" podStartSLOduration=125.658001245 podStartE2EDuration="2m5.658001245s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.656477325 +0000 UTC m=+148.327971882" watchObservedRunningTime="2026-01-29 06:37:36.658001245 +0000 UTC m=+148.329495802" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.666654 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.666994 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.166976666 +0000 UTC m=+148.838471223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.682097 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.730651 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-wxx66" podStartSLOduration=7.730632318 podStartE2EDuration="7.730632318s" podCreationTimestamp="2026-01-29 06:37:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.72882451 +0000 UTC m=+148.400319067" watchObservedRunningTime="2026-01-29 06:37:36.730632318 +0000 UTC m=+148.402126875" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.767634 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.767993 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.267977687 +0000 UTC m=+148.939472244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.768654 4861 patch_prober.go:28] interesting pod/router-default-5444994796-9qj5w container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 06:37:36 crc kubenswrapper[4861]: [-]has-synced failed: reason withheld Jan 29 06:37:36 crc kubenswrapper[4861]: [+]process-running ok Jan 29 06:37:36 crc kubenswrapper[4861]: healthz check failed Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.768717 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9qj5w" podUID="8f74e6b3-bf32-4188-9246-3e164a0857ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.795335 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-f9vlr" podStartSLOduration=125.795312668 podStartE2EDuration="2m5.795312668s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.761687319 +0000 UTC m=+148.433181876" watchObservedRunningTime="2026-01-29 06:37:36.795312668 +0000 UTC m=+148.466807225" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.857893 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fmh7v" podStartSLOduration=125.857877661 podStartE2EDuration="2m5.857877661s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.803150558 +0000 UTC m=+148.474645115" watchObservedRunningTime="2026-01-29 06:37:36.857877661 +0000 UTC m=+148.529372218" Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.869537 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.869928 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.369912813 +0000 UTC m=+149.041407360 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.975762 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:36 crc kubenswrapper[4861]: E0129 06:37:36.976083 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.476056992 +0000 UTC m=+149.147551549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:36 crc kubenswrapper[4861]: I0129 06:37:36.976937 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xqwts" podStartSLOduration=125.976918145 podStartE2EDuration="2m5.976918145s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:36.916259033 +0000 UTC m=+148.587753590" watchObservedRunningTime="2026-01-29 06:37:36.976918145 +0000 UTC m=+148.648412702" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.023435 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fsvsh" podStartSLOduration=126.023421649 podStartE2EDuration="2m6.023421649s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:37.021362654 +0000 UTC m=+148.692857221" watchObservedRunningTime="2026-01-29 06:37:37.023421649 +0000 UTC m=+148.694916206" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.078567 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.078952 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.079002 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.079045 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.079096 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.079923 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.57990251 +0000 UTC m=+149.251397057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.086183 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.103308 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.105417 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.112686 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.180744 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.181006 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.680996004 +0000 UTC m=+149.352490561 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.281559 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.281819 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.781805051 +0000 UTC m=+149.453299608 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.316878 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x82xx" event={"ID":"841455ff-8571-49ca-9aec-fb055f63bbef","Type":"ContainerStarted","Data":"f94cf5c98aac3caa8978554c6ac194b27f6e4d28c020443a630046af76e8dd3b"} Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.318420 4861 patch_prober.go:28] interesting pod/downloads-7954f5f757-jng8m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.318474 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jng8m" podUID="f7f7f0b2-fcab-4777-bd72-60bf1b3fede4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.319040 4861 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-7tpcj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.319084 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: connect: connection refused" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.338388 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-ffbbx" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.343281 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.373035 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.387376 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.388130 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.388403 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.888392282 +0000 UTC m=+149.559886839 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.488791 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.491924 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:37.991905461 +0000 UTC m=+149.663400018 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.591717 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.592012 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.091999829 +0000 UTC m=+149.763494376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.695615 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.696121 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.196105863 +0000 UTC m=+149.867600420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.775969 4861 patch_prober.go:28] interesting pod/router-default-5444994796-9qj5w container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 06:37:37 crc kubenswrapper[4861]: [-]has-synced failed: reason withheld Jan 29 06:37:37 crc kubenswrapper[4861]: [+]process-running ok Jan 29 06:37:37 crc kubenswrapper[4861]: healthz check failed Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.776021 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9qj5w" podUID="8f74e6b3-bf32-4188-9246-3e164a0857ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.797361 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.797596 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.297586008 +0000 UTC m=+149.969080565 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.898804 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:37 crc kubenswrapper[4861]: E0129 06:37:37.899084 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.399055082 +0000 UTC m=+150.070549639 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:37 crc kubenswrapper[4861]: I0129 06:37:37.971417 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-vvwrw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.004757 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.005024 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.505013276 +0000 UTC m=+150.176507833 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.108222 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.108559 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.608544496 +0000 UTC m=+150.280039053 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.218548 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.219066 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.719056042 +0000 UTC m=+150.390550599 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.321557 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.321886 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.821872642 +0000 UTC m=+150.493367199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.340645 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-79pqx"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.341568 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.343160 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-79pqx"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.357837 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.360393 4861 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.360910 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"11c25f3986564b251fba96581160c802156291a268aa85bd68cad46abdb9c2a4"} Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.360941 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"c55f8e34e902871fa6719b4ea8cc4ac68d5fbdfba1f631d4544e64cc5701cad1"} Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.363350 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e0d677f04e4db3f4240cdaab24a1bf808adc863f48a9ad5f5992fd1db55f6b56"} Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.367876 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x82xx" event={"ID":"841455ff-8571-49ca-9aec-fb055f63bbef","Type":"ContainerStarted","Data":"c087777452e8aeae1913676a0532e76ccba43a92293feb5c923265ea97f4bee3"} Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.424103 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.424463 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:38.924452276 +0000 UTC m=+150.595946833 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.426656 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-24c9r" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.497116 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-b5hfw"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.497949 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.505497 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.515181 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b5hfw"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.525220 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.525482 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-catalog-content\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.525692 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnktp\" (UniqueName: \"kubernetes.io/projected/7d4408c0-2666-4112-ba0d-c9427a29fc66-kube-api-access-rnktp\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.525722 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-utilities\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.526736 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.026722752 +0000 UTC m=+150.698217309 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.541435 4861 patch_prober.go:28] interesting pod/apiserver-76f77b778f-q6lpc container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]log ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]etcd ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/generic-apiserver-start-informers ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/max-in-flight-filter ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 29 06:37:38 crc kubenswrapper[4861]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 29 06:37:38 crc kubenswrapper[4861]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/project.openshift.io-projectcache ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 29 06:37:38 crc kubenswrapper[4861]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 29 06:37:38 crc kubenswrapper[4861]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 29 06:37:38 crc kubenswrapper[4861]: livez check failed Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.541486 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" podUID="3155916f-0fd6-4e27-9ddc-d0cff45ae575" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.626938 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.627234 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-utilities\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.627264 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-catalog-content\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.627296 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-catalog-content\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.627332 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnktp\" (UniqueName: \"kubernetes.io/projected/7d4408c0-2666-4112-ba0d-c9427a29fc66-kube-api-access-rnktp\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.627348 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c8m7\" (UniqueName: \"kubernetes.io/projected/840bff63-3c46-4e8d-baa2-32315812df45-kube-api-access-9c8m7\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.627364 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-utilities\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.627736 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-utilities\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.627892 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.127877948 +0000 UTC m=+150.799372505 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.627948 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-catalog-content\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.651367 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnktp\" (UniqueName: \"kubernetes.io/projected/7d4408c0-2666-4112-ba0d-c9427a29fc66-kube-api-access-rnktp\") pod \"community-operators-79pqx\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.682738 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.690097 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cj66x"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.691041 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.708773 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cj66x"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.728416 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.728608 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.228565951 +0000 UTC m=+150.900060508 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.728663 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.728707 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-utilities\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.728770 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-catalog-content\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.728808 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c8m7\" (UniqueName: \"kubernetes.io/projected/840bff63-3c46-4e8d-baa2-32315812df45-kube-api-access-9c8m7\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.728951 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.228943851 +0000 UTC m=+150.900438408 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.729325 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-utilities\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.729564 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-catalog-content\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.742641 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c8m7\" (UniqueName: \"kubernetes.io/projected/840bff63-3c46-4e8d-baa2-32315812df45-kube-api-access-9c8m7\") pod \"certified-operators-b5hfw\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.770698 4861 patch_prober.go:28] interesting pod/router-default-5444994796-9qj5w container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 06:37:38 crc kubenswrapper[4861]: [-]has-synced failed: reason withheld Jan 29 06:37:38 crc kubenswrapper[4861]: [+]process-running ok Jan 29 06:37:38 crc kubenswrapper[4861]: healthz check failed Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.770774 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9qj5w" podUID="8f74e6b3-bf32-4188-9246-3e164a0857ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.824615 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.830890 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.831009 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.330988731 +0000 UTC m=+151.002483288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.831109 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-catalog-content\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.831202 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.831252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28sxd\" (UniqueName: \"kubernetes.io/projected/df326277-c54e-44f7-87df-260579302ade-kube-api-access-28sxd\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.831281 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-utilities\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.831469 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.331461924 +0000 UTC m=+151.002956471 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.887206 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wvlgr"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.888182 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.896110 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wvlgr"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.931221 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.933996 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.936257 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.936794 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.436771911 +0000 UTC m=+151.108266468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.937632 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-catalog-content\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.937754 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.937814 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-utilities\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.938357 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-catalog-content\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.938927 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28sxd\" (UniqueName: \"kubernetes.io/projected/df326277-c54e-44f7-87df-260579302ade-kube-api-access-28sxd\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.938989 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-utilities\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:38 crc kubenswrapper[4861]: E0129 06:37:38.939481 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.439464873 +0000 UTC m=+151.110959430 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.942721 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.942793 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.950339 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-79pqx"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.960925 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 06:37:38 crc kubenswrapper[4861]: I0129 06:37:38.970564 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28sxd\" (UniqueName: \"kubernetes.io/projected/df326277-c54e-44f7-87df-260579302ade-kube-api-access-28sxd\") pod \"community-operators-cj66x\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.009615 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.057826 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:39 crc kubenswrapper[4861]: E0129 06:37:39.058766 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.558745763 +0000 UTC m=+151.230240330 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.058870 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.058915 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.058945 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-utilities\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.058969 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-catalog-content\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.059002 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.059026 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wlgs\" (UniqueName: \"kubernetes.io/projected/363fbc7b-db52-48a7-8789-c46f1304adfe-kube-api-access-2wlgs\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: E0129 06:37:39.059433 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.559423711 +0000 UTC m=+151.230918278 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.165673 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.165893 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.165931 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.165972 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-utilities\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.165991 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-catalog-content\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.166016 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wlgs\" (UniqueName: \"kubernetes.io/projected/363fbc7b-db52-48a7-8789-c46f1304adfe-kube-api-access-2wlgs\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: E0129 06:37:39.166663 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.66664865 +0000 UTC m=+151.338143207 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.166820 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.167176 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-utilities\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.167376 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-catalog-content\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.184131 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.186145 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wlgs\" (UniqueName: \"kubernetes.io/projected/363fbc7b-db52-48a7-8789-c46f1304adfe-kube-api-access-2wlgs\") pod \"certified-operators-wvlgr\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.206691 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.235148 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cj66x"] Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.270395 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:39 crc kubenswrapper[4861]: E0129 06:37:39.270666 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 06:37:39.770654972 +0000 UTC m=+151.442149529 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-v4kqn" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.275630 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b5hfw"] Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.290438 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.290718 4861 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-29T06:37:38.360407333Z","Handler":null,"Name":""} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.293626 4861 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.293680 4861 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 29 06:37:39 crc kubenswrapper[4861]: W0129 06:37:39.307321 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod840bff63_3c46_4e8d_baa2_32315812df45.slice/crio-4099aef213edc2a757958af7ee5d9e7e9486646ec155b4dc13fecb7d9a48bf58 WatchSource:0}: Error finding container 4099aef213edc2a757958af7ee5d9e7e9486646ec155b4dc13fecb7d9a48bf58: Status 404 returned error can't find the container with id 4099aef213edc2a757958af7ee5d9e7e9486646ec155b4dc13fecb7d9a48bf58 Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.371475 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.379987 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerID="197c321c79ef1fbf0df338b6861071eec4006ec285d775b09dec1181b80d7db7" exitCode=0 Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.380050 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79pqx" event={"ID":"7d4408c0-2666-4112-ba0d-c9427a29fc66","Type":"ContainerDied","Data":"197c321c79ef1fbf0df338b6861071eec4006ec285d775b09dec1181b80d7db7"} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.380088 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79pqx" event={"ID":"7d4408c0-2666-4112-ba0d-c9427a29fc66","Type":"ContainerStarted","Data":"02d66eb19cdd90c1dafb5582c1484fe7a3f025c121652a052d3a9f6e13ceec85"} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.381331 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.385019 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"95700e3d1f7e0e58754fb449156b7b4603d938463543e165aa9aa89ecf5c8cce"} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.387550 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cj66x" event={"ID":"df326277-c54e-44f7-87df-260579302ade","Type":"ContainerStarted","Data":"96bbca63e8f71dc06c9bb61ee46c7e161fb53b97be104c7a39b7845cd7673005"} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.388322 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b5hfw" event={"ID":"840bff63-3c46-4e8d-baa2-32315812df45","Type":"ContainerStarted","Data":"4099aef213edc2a757958af7ee5d9e7e9486646ec155b4dc13fecb7d9a48bf58"} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.399459 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x82xx" event={"ID":"841455ff-8571-49ca-9aec-fb055f63bbef","Type":"ContainerStarted","Data":"4e03c268f76439b6cf4a569cdf85b6cc258ad1971d8a7be68c845b589afadfec"} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.406051 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5cd6c80e4e67ecff2a6f820a015040521405176ecb9d262477505188cfd7482f"} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.406096 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"b12a24cb0c28f70237e58f15049b394276f27b88c69317786f5e3acda3454e55"} Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.406264 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.434847 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.446618 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-x82xx" podStartSLOduration=10.446604008 podStartE2EDuration="10.446604008s" podCreationTimestamp="2026-01-29 06:37:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:39.444630526 +0000 UTC m=+151.116125093" watchObservedRunningTime="2026-01-29 06:37:39.446604008 +0000 UTC m=+151.118098565" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.475510 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.481361 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wvlgr"] Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.486771 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.486799 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.584547 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.587021 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-v4kqn\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.651600 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.770974 4861 patch_prober.go:28] interesting pod/router-default-5444994796-9qj5w container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 06:37:39 crc kubenswrapper[4861]: [-]has-synced failed: reason withheld Jan 29 06:37:39 crc kubenswrapper[4861]: [+]process-running ok Jan 29 06:37:39 crc kubenswrapper[4861]: healthz check failed Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.771039 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9qj5w" podUID="8f74e6b3-bf32-4188-9246-3e164a0857ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:37:39 crc kubenswrapper[4861]: I0129 06:37:39.852177 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v4kqn"] Jan 29 06:37:39 crc kubenswrapper[4861]: W0129 06:37:39.863205 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc66ab458_ce20_4c27_99d5_e328b6397bd4.slice/crio-3f0ac800c801eff6587047f590baf5296e1ac5756659a8ea23c6d8257a461c2c WatchSource:0}: Error finding container 3f0ac800c801eff6587047f590baf5296e1ac5756659a8ea23c6d8257a461c2c: Status 404 returned error can't find the container with id 3f0ac800c801eff6587047f590baf5296e1ac5756659a8ea23c6d8257a461c2c Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.294909 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-whxwx"] Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.301922 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.304868 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-whxwx"] Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.310595 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.395056 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-catalog-content\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.395605 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72k8s\" (UniqueName: \"kubernetes.io/projected/77c754cb-8b6c-4c93-940e-fb17e49b51e6-kube-api-access-72k8s\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.395683 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-utilities\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.414954 4861 generic.go:334] "Generic (PLEG): container finished" podID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerID="7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236" exitCode=0 Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.415025 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvlgr" event={"ID":"363fbc7b-db52-48a7-8789-c46f1304adfe","Type":"ContainerDied","Data":"7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236"} Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.415057 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvlgr" event={"ID":"363fbc7b-db52-48a7-8789-c46f1304adfe","Type":"ContainerStarted","Data":"b1f90b7e6225c61dab3ac188a516edecaac768a3402c4588634e8ac5dfe76213"} Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.416959 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ffff18c1-7a03-4b5d-bb83-402d2f9725f8","Type":"ContainerStarted","Data":"4b936c361801c5827c93c1dba647d3855fad2f42d5886cbdcc63d868c42d0350"} Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.416998 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ffff18c1-7a03-4b5d-bb83-402d2f9725f8","Type":"ContainerStarted","Data":"02974c88a59fdb44afc98e1cad4e0b137eafa7a7d58648b0957f97b7d4980af5"} Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.421908 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" event={"ID":"c66ab458-ce20-4c27-99d5-e328b6397bd4","Type":"ContainerStarted","Data":"8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781"} Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.421955 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" event={"ID":"c66ab458-ce20-4c27-99d5-e328b6397bd4","Type":"ContainerStarted","Data":"3f0ac800c801eff6587047f590baf5296e1ac5756659a8ea23c6d8257a461c2c"} Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.421987 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.423582 4861 generic.go:334] "Generic (PLEG): container finished" podID="df326277-c54e-44f7-87df-260579302ade" containerID="115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb" exitCode=0 Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.423640 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cj66x" event={"ID":"df326277-c54e-44f7-87df-260579302ade","Type":"ContainerDied","Data":"115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb"} Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.425491 4861 generic.go:334] "Generic (PLEG): container finished" podID="840bff63-3c46-4e8d-baa2-32315812df45" containerID="cd75d3b90bd93e3166e7cb14ec279d247afb5f53401fb999198b975610b288e0" exitCode=0 Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.425580 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b5hfw" event={"ID":"840bff63-3c46-4e8d-baa2-32315812df45","Type":"ContainerDied","Data":"cd75d3b90bd93e3166e7cb14ec279d247afb5f53401fb999198b975610b288e0"} Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.493864 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.493845311 podStartE2EDuration="2.493845311s" podCreationTimestamp="2026-01-29 06:37:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:40.49158858 +0000 UTC m=+152.163083137" watchObservedRunningTime="2026-01-29 06:37:40.493845311 +0000 UTC m=+152.165339868" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.497159 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-catalog-content\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.497225 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72k8s\" (UniqueName: \"kubernetes.io/projected/77c754cb-8b6c-4c93-940e-fb17e49b51e6-kube-api-access-72k8s\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.497243 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-utilities\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.497657 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-utilities\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.497864 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-catalog-content\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.512573 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" podStartSLOduration=129.512554401 podStartE2EDuration="2m9.512554401s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:40.509819578 +0000 UTC m=+152.181314145" watchObservedRunningTime="2026-01-29 06:37:40.512554401 +0000 UTC m=+152.184048958" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.518791 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72k8s\" (UniqueName: \"kubernetes.io/projected/77c754cb-8b6c-4c93-940e-fb17e49b51e6-kube-api-access-72k8s\") pod \"redhat-marketplace-whxwx\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.630612 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.691659 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4chs9"] Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.692611 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.708799 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chs9"] Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.770941 4861 patch_prober.go:28] interesting pod/router-default-5444994796-9qj5w container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 06:37:40 crc kubenswrapper[4861]: [-]has-synced failed: reason withheld Jan 29 06:37:40 crc kubenswrapper[4861]: [+]process-running ok Jan 29 06:37:40 crc kubenswrapper[4861]: healthz check failed Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.771001 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9qj5w" podUID="8f74e6b3-bf32-4188-9246-3e164a0857ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.801648 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-utilities\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.802088 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-522q8\" (UniqueName: \"kubernetes.io/projected/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-kube-api-access-522q8\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.802156 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-catalog-content\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.887192 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-whxwx"] Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.902980 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-utilities\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.903032 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-522q8\" (UniqueName: \"kubernetes.io/projected/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-kube-api-access-522q8\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.903281 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-catalog-content\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.903784 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-utilities\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.903928 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-catalog-content\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:40 crc kubenswrapper[4861]: I0129 06:37:40.922935 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-522q8\" (UniqueName: \"kubernetes.io/projected/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-kube-api-access-522q8\") pod \"redhat-marketplace-4chs9\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.026142 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.133447 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.447950 4861 generic.go:334] "Generic (PLEG): container finished" podID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerID="2cdab55f008f908014f320528609dd185e1599831f7c1d8fd05d449a5878af8f" exitCode=0 Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.448000 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-whxwx" event={"ID":"77c754cb-8b6c-4c93-940e-fb17e49b51e6","Type":"ContainerDied","Data":"2cdab55f008f908014f320528609dd185e1599831f7c1d8fd05d449a5878af8f"} Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.448051 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-whxwx" event={"ID":"77c754cb-8b6c-4c93-940e-fb17e49b51e6","Type":"ContainerStarted","Data":"8330dc22f386d4c3fcb513c9ef416971b9193dc6658c07445413c47913f633cd"} Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.453025 4861 generic.go:334] "Generic (PLEG): container finished" podID="ffff18c1-7a03-4b5d-bb83-402d2f9725f8" containerID="4b936c361801c5827c93c1dba647d3855fad2f42d5886cbdcc63d868c42d0350" exitCode=0 Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.453890 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ffff18c1-7a03-4b5d-bb83-402d2f9725f8","Type":"ContainerDied","Data":"4b936c361801c5827c93c1dba647d3855fad2f42d5886cbdcc63d868c42d0350"} Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.489318 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-z4rxt"] Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.490501 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.494475 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.511926 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chs9"] Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.521835 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-z4rxt"] Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.526406 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.545837 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-q6lpc" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.613274 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9ssd\" (UniqueName: \"kubernetes.io/projected/6ee375d9-c78e-4c98-a04f-c004903dbf12-kube-api-access-z9ssd\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.619465 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-utilities\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.619523 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-catalog-content\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.721616 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-utilities\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.721657 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-catalog-content\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.721692 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9ssd\" (UniqueName: \"kubernetes.io/projected/6ee375d9-c78e-4c98-a04f-c004903dbf12-kube-api-access-z9ssd\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.722346 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-utilities\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.722542 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-catalog-content\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.738859 4861 patch_prober.go:28] interesting pod/downloads-7954f5f757-jng8m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.738913 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-jng8m" podUID="f7f7f0b2-fcab-4777-bd72-60bf1b3fede4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.738906 4861 patch_prober.go:28] interesting pod/downloads-7954f5f757-jng8m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.738984 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-jng8m" podUID="f7f7f0b2-fcab-4777-bd72-60bf1b3fede4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.774166 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9ssd\" (UniqueName: \"kubernetes.io/projected/6ee375d9-c78e-4c98-a04f-c004903dbf12-kube-api-access-z9ssd\") pod \"redhat-operators-z4rxt\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.787098 4861 patch_prober.go:28] interesting pod/router-default-5444994796-9qj5w container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 06:37:41 crc kubenswrapper[4861]: [-]has-synced failed: reason withheld Jan 29 06:37:41 crc kubenswrapper[4861]: [+]process-running ok Jan 29 06:37:41 crc kubenswrapper[4861]: healthz check failed Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.787233 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9qj5w" podUID="8f74e6b3-bf32-4188-9246-3e164a0857ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.820636 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.884594 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nwd2x"] Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.886270 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.920718 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nwd2x"] Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.972833 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.972871 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.975960 4861 patch_prober.go:28] interesting pod/console-f9d7485db-hrbzh container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 29 06:37:41 crc kubenswrapper[4861]: I0129 06:37:41.976010 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-hrbzh" podUID="3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.029021 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-utilities\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.029172 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7vkv\" (UniqueName: \"kubernetes.io/projected/409ca36f-ff5f-45de-b030-84876d623a63-kube-api-access-h7vkv\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.029223 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-catalog-content\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.133635 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-utilities\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.133694 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7vkv\" (UniqueName: \"kubernetes.io/projected/409ca36f-ff5f-45de-b030-84876d623a63-kube-api-access-h7vkv\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.133730 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-catalog-content\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.134448 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-catalog-content\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.136758 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-utilities\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.176912 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7vkv\" (UniqueName: \"kubernetes.io/projected/409ca36f-ff5f-45de-b030-84876d623a63-kube-api-access-h7vkv\") pod \"redhat-operators-nwd2x\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.215190 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-z4rxt"] Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.281618 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.466552 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4rxt" event={"ID":"6ee375d9-c78e-4c98-a04f-c004903dbf12","Type":"ContainerStarted","Data":"1f89b31f4f951a938be6850891200332be3272cab6cb70843d30bb49ef62c0c5"} Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.474525 4861 generic.go:334] "Generic (PLEG): container finished" podID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerID="e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3" exitCode=0 Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.475917 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chs9" event={"ID":"65a563c5-87b1-4cc7-be2f-e8cdc6290eca","Type":"ContainerDied","Data":"e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3"} Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.475982 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chs9" event={"ID":"65a563c5-87b1-4cc7-be2f-e8cdc6290eca","Type":"ContainerStarted","Data":"ebe1c9fcec8542875a0018fc5cf1400dd063bd119fe55510ca1f8992bea6c252"} Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.766454 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.770046 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.793754 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nwd2x"] Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.794641 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.810559 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.954168 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kubelet-dir\") pod \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\" (UID: \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\") " Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.954241 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kube-api-access\") pod \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\" (UID: \"ffff18c1-7a03-4b5d-bb83-402d2f9725f8\") " Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.954491 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ffff18c1-7a03-4b5d-bb83-402d2f9725f8" (UID: "ffff18c1-7a03-4b5d-bb83-402d2f9725f8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:37:42 crc kubenswrapper[4861]: I0129 06:37:42.963477 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ffff18c1-7a03-4b5d-bb83-402d2f9725f8" (UID: "ffff18c1-7a03-4b5d-bb83-402d2f9725f8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.055957 4861 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.055987 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffff18c1-7a03-4b5d-bb83-402d2f9725f8-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.495030 4861 generic.go:334] "Generic (PLEG): container finished" podID="409ca36f-ff5f-45de-b030-84876d623a63" containerID="b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100" exitCode=0 Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.495120 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwd2x" event={"ID":"409ca36f-ff5f-45de-b030-84876d623a63","Type":"ContainerDied","Data":"b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100"} Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.495497 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwd2x" event={"ID":"409ca36f-ff5f-45de-b030-84876d623a63","Type":"ContainerStarted","Data":"917c74caffe555c84b86800855e0b7380f9f6d3447bdbee133376f826739cb72"} Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.498025 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.498091 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ffff18c1-7a03-4b5d-bb83-402d2f9725f8","Type":"ContainerDied","Data":"02974c88a59fdb44afc98e1cad4e0b137eafa7a7d58648b0957f97b7d4980af5"} Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.498115 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02974c88a59fdb44afc98e1cad4e0b137eafa7a7d58648b0957f97b7d4980af5" Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.516327 4861 generic.go:334] "Generic (PLEG): container finished" podID="cead6a1e-5df8-4937-9ee3-71efe555615f" containerID="e4b916c599c0cc7e6a0d63d8fb6a748a231e2a87702527f7c74b582ca5fb4435" exitCode=0 Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.516408 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" event={"ID":"cead6a1e-5df8-4937-9ee3-71efe555615f","Type":"ContainerDied","Data":"e4b916c599c0cc7e6a0d63d8fb6a748a231e2a87702527f7c74b582ca5fb4435"} Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.525123 4861 generic.go:334] "Generic (PLEG): container finished" podID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerID="1f17e4f79ebf610072f06f1725d3c6805c6bfa238c85aedf1d6eec6fd2de6189" exitCode=0 Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.525198 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4rxt" event={"ID":"6ee375d9-c78e-4c98-a04f-c004903dbf12","Type":"ContainerDied","Data":"1f17e4f79ebf610072f06f1725d3c6805c6bfa238c85aedf1d6eec6fd2de6189"} Jan 29 06:37:43 crc kubenswrapper[4861]: I0129 06:37:43.530066 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-9qj5w" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.476758 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 06:37:44 crc kubenswrapper[4861]: E0129 06:37:44.476954 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffff18c1-7a03-4b5d-bb83-402d2f9725f8" containerName="pruner" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.476965 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffff18c1-7a03-4b5d-bb83-402d2f9725f8" containerName="pruner" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.477057 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffff18c1-7a03-4b5d-bb83-402d2f9725f8" containerName="pruner" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.477504 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.480481 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.480680 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.529110 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.579262 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/16ec3227-64fb-42a1-8876-eb7773a03df9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"16ec3227-64fb-42a1-8876-eb7773a03df9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.579649 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/16ec3227-64fb-42a1-8876-eb7773a03df9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"16ec3227-64fb-42a1-8876-eb7773a03df9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.681641 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/16ec3227-64fb-42a1-8876-eb7773a03df9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"16ec3227-64fb-42a1-8876-eb7773a03df9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.681784 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/16ec3227-64fb-42a1-8876-eb7773a03df9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"16ec3227-64fb-42a1-8876-eb7773a03df9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.681895 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/16ec3227-64fb-42a1-8876-eb7773a03df9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"16ec3227-64fb-42a1-8876-eb7773a03df9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.716083 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/16ec3227-64fb-42a1-8876-eb7773a03df9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"16ec3227-64fb-42a1-8876-eb7773a03df9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.841331 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.844622 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.898286 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-wxx66" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.986303 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cead6a1e-5df8-4937-9ee3-71efe555615f-secret-volume\") pod \"cead6a1e-5df8-4937-9ee3-71efe555615f\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.986403 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rl96w\" (UniqueName: \"kubernetes.io/projected/cead6a1e-5df8-4937-9ee3-71efe555615f-kube-api-access-rl96w\") pod \"cead6a1e-5df8-4937-9ee3-71efe555615f\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.986447 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cead6a1e-5df8-4937-9ee3-71efe555615f-config-volume\") pod \"cead6a1e-5df8-4937-9ee3-71efe555615f\" (UID: \"cead6a1e-5df8-4937-9ee3-71efe555615f\") " Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.989327 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cead6a1e-5df8-4937-9ee3-71efe555615f-config-volume" (OuterVolumeSpecName: "config-volume") pod "cead6a1e-5df8-4937-9ee3-71efe555615f" (UID: "cead6a1e-5df8-4937-9ee3-71efe555615f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.995416 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cead6a1e-5df8-4937-9ee3-71efe555615f-kube-api-access-rl96w" (OuterVolumeSpecName: "kube-api-access-rl96w") pod "cead6a1e-5df8-4937-9ee3-71efe555615f" (UID: "cead6a1e-5df8-4937-9ee3-71efe555615f"). InnerVolumeSpecName "kube-api-access-rl96w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:37:44 crc kubenswrapper[4861]: I0129 06:37:44.995671 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cead6a1e-5df8-4937-9ee3-71efe555615f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cead6a1e-5df8-4937-9ee3-71efe555615f" (UID: "cead6a1e-5df8-4937-9ee3-71efe555615f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.088464 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cead6a1e-5df8-4937-9ee3-71efe555615f-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.088516 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rl96w\" (UniqueName: \"kubernetes.io/projected/cead6a1e-5df8-4937-9ee3-71efe555615f-kube-api-access-rl96w\") on node \"crc\" DevicePath \"\"" Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.088528 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cead6a1e-5df8-4937-9ee3-71efe555615f-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.314439 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.345084 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.555009 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"16ec3227-64fb-42a1-8876-eb7773a03df9","Type":"ContainerStarted","Data":"131a391a98e5566797c8a234491364e48a4ecc66bc45a63f8e75c872e6933479"} Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.561777 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" event={"ID":"cead6a1e-5df8-4937-9ee3-71efe555615f","Type":"ContainerDied","Data":"64848bd9fbdd13a9af22fbdb989eb31339fc8d7ab2aaaad45f0e2978da015e80"} Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.561883 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64848bd9fbdd13a9af22fbdb989eb31339fc8d7ab2aaaad45f0e2978da015e80" Jan 29 06:37:45 crc kubenswrapper[4861]: I0129 06:37:45.561886 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4" Jan 29 06:37:46 crc kubenswrapper[4861]: I0129 06:37:46.573881 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"16ec3227-64fb-42a1-8876-eb7773a03df9","Type":"ContainerStarted","Data":"63c16c6622bb694a92aa29ec1069b8a02795da81b9e1517d7440a95b88c28052"} Jan 29 06:37:46 crc kubenswrapper[4861]: I0129 06:37:46.591046 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.591022077 podStartE2EDuration="2.591022077s" podCreationTimestamp="2026-01-29 06:37:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:37:46.587878013 +0000 UTC m=+158.259372580" watchObservedRunningTime="2026-01-29 06:37:46.591022077 +0000 UTC m=+158.262516634" Jan 29 06:37:47 crc kubenswrapper[4861]: I0129 06:37:47.583230 4861 generic.go:334] "Generic (PLEG): container finished" podID="16ec3227-64fb-42a1-8876-eb7773a03df9" containerID="63c16c6622bb694a92aa29ec1069b8a02795da81b9e1517d7440a95b88c28052" exitCode=0 Jan 29 06:37:47 crc kubenswrapper[4861]: I0129 06:37:47.583296 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"16ec3227-64fb-42a1-8876-eb7773a03df9","Type":"ContainerDied","Data":"63c16c6622bb694a92aa29ec1069b8a02795da81b9e1517d7440a95b88c28052"} Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.568218 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.613707 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"16ec3227-64fb-42a1-8876-eb7773a03df9","Type":"ContainerDied","Data":"131a391a98e5566797c8a234491364e48a4ecc66bc45a63f8e75c872e6933479"} Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.613764 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="131a391a98e5566797c8a234491364e48a4ecc66bc45a63f8e75c872e6933479" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.613827 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.725944 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/16ec3227-64fb-42a1-8876-eb7773a03df9-kube-api-access\") pod \"16ec3227-64fb-42a1-8876-eb7773a03df9\" (UID: \"16ec3227-64fb-42a1-8876-eb7773a03df9\") " Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.726546 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/16ec3227-64fb-42a1-8876-eb7773a03df9-kubelet-dir\") pod \"16ec3227-64fb-42a1-8876-eb7773a03df9\" (UID: \"16ec3227-64fb-42a1-8876-eb7773a03df9\") " Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.726677 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/16ec3227-64fb-42a1-8876-eb7773a03df9-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "16ec3227-64fb-42a1-8876-eb7773a03df9" (UID: "16ec3227-64fb-42a1-8876-eb7773a03df9"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.726955 4861 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/16ec3227-64fb-42a1-8876-eb7773a03df9-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.738389 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16ec3227-64fb-42a1-8876-eb7773a03df9-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "16ec3227-64fb-42a1-8876-eb7773a03df9" (UID: "16ec3227-64fb-42a1-8876-eb7773a03df9"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.744518 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-jng8m" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.828642 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/16ec3227-64fb-42a1-8876-eb7773a03df9-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.978264 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:51 crc kubenswrapper[4861]: I0129 06:37:51.981489 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:37:54 crc kubenswrapper[4861]: I0129 06:37:53.998479 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:54 crc kubenswrapper[4861]: I0129 06:37:54.035541 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb22f8f6-1210-4f39-8712-d33efc26239c-metrics-certs\") pod \"network-metrics-daemon-rh69l\" (UID: \"fb22f8f6-1210-4f39-8712-d33efc26239c\") " pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:54 crc kubenswrapper[4861]: I0129 06:37:54.158407 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rh69l" Jan 29 06:37:54 crc kubenswrapper[4861]: I0129 06:37:54.358293 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-rh69l"] Jan 29 06:37:54 crc kubenswrapper[4861]: W0129 06:37:54.367847 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb22f8f6_1210_4f39_8712_d33efc26239c.slice/crio-a35499d0a28cfd0cbe47600da8b44a4b0620c2060887952fda74d141667f1e23 WatchSource:0}: Error finding container a35499d0a28cfd0cbe47600da8b44a4b0620c2060887952fda74d141667f1e23: Status 404 returned error can't find the container with id a35499d0a28cfd0cbe47600da8b44a4b0620c2060887952fda74d141667f1e23 Jan 29 06:37:54 crc kubenswrapper[4861]: I0129 06:37:54.642586 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rh69l" event={"ID":"fb22f8f6-1210-4f39-8712-d33efc26239c","Type":"ContainerStarted","Data":"a35499d0a28cfd0cbe47600da8b44a4b0620c2060887952fda74d141667f1e23"} Jan 29 06:37:55 crc kubenswrapper[4861]: I0129 06:37:55.665826 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rh69l" event={"ID":"fb22f8f6-1210-4f39-8712-d33efc26239c","Type":"ContainerStarted","Data":"07928959be9990bc217070b9ca9b75fc630b869777f25a6c30e6063fdc35e0bc"} Jan 29 06:37:59 crc kubenswrapper[4861]: I0129 06:37:59.660711 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:38:00 crc kubenswrapper[4861]: I0129 06:38:00.629857 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:38:00 crc kubenswrapper[4861]: I0129 06:38:00.629978 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:38:12 crc kubenswrapper[4861]: I0129 06:38:12.816595 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gjhzk" Jan 29 06:38:15 crc kubenswrapper[4861]: E0129 06:38:15.192340 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 29 06:38:15 crc kubenswrapper[4861]: E0129 06:38:15.192851 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-72k8s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-whxwx_openshift-marketplace(77c754cb-8b6c-4c93-940e-fb17e49b51e6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 06:38:15 crc kubenswrapper[4861]: E0129 06:38:15.194215 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-whxwx" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" Jan 29 06:38:17 crc kubenswrapper[4861]: I0129 06:38:17.394307 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 06:38:18 crc kubenswrapper[4861]: E0129 06:38:18.827731 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-whxwx" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" Jan 29 06:38:18 crc kubenswrapper[4861]: E0129 06:38:18.907173 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 29 06:38:18 crc kubenswrapper[4861]: E0129 06:38:18.907744 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z9ssd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-z4rxt_openshift-marketplace(6ee375d9-c78e-4c98-a04f-c004903dbf12): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 06:38:18 crc kubenswrapper[4861]: E0129 06:38:18.908920 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-z4rxt" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" Jan 29 06:38:18 crc kubenswrapper[4861]: E0129 06:38:18.912385 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 29 06:38:18 crc kubenswrapper[4861]: E0129 06:38:18.912463 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9c8m7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b5hfw_openshift-marketplace(840bff63-3c46-4e8d-baa2-32315812df45): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 06:38:18 crc kubenswrapper[4861]: E0129 06:38:18.913599 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-b5hfw" podUID="840bff63-3c46-4e8d-baa2-32315812df45" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.145573 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-z4rxt" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.146258 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b5hfw" podUID="840bff63-3c46-4e8d-baa2-32315812df45" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.244198 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.244378 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-28sxd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-cj66x_openshift-marketplace(df326277-c54e-44f7-87df-260579302ade): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.245895 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-cj66x" podUID="df326277-c54e-44f7-87df-260579302ade" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.266235 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.266391 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2wlgs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-wvlgr_openshift-marketplace(363fbc7b-db52-48a7-8789-c46f1304adfe): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.267709 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-wvlgr" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.284528 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.284687 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rnktp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-79pqx_openshift-marketplace(7d4408c0-2666-4112-ba0d-c9427a29fc66): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.285968 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-79pqx" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.297199 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.297364 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-522q8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-4chs9_openshift-marketplace(65a563c5-87b1-4cc7-be2f-e8cdc6290eca): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.298557 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-4chs9" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.841036 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rh69l" event={"ID":"fb22f8f6-1210-4f39-8712-d33efc26239c","Type":"ContainerStarted","Data":"4fab1e868b08b4fe856aa60f736c104ac69ca31a12c543dee0426680ffdfeb01"} Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.843720 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwd2x" event={"ID":"409ca36f-ff5f-45de-b030-84876d623a63","Type":"ContainerStarted","Data":"186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953"} Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.847020 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-79pqx" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.847774 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-4chs9" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.848643 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-wvlgr" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.851675 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-cj66x" podUID="df326277-c54e-44f7-87df-260579302ade" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.887660 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.888026 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cead6a1e-5df8-4937-9ee3-71efe555615f" containerName="collect-profiles" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.888044 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="cead6a1e-5df8-4937-9ee3-71efe555615f" containerName="collect-profiles" Jan 29 06:38:20 crc kubenswrapper[4861]: E0129 06:38:20.888057 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16ec3227-64fb-42a1-8876-eb7773a03df9" containerName="pruner" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.888064 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="16ec3227-64fb-42a1-8876-eb7773a03df9" containerName="pruner" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.888245 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="cead6a1e-5df8-4937-9ee3-71efe555615f" containerName="collect-profiles" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.888260 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="16ec3227-64fb-42a1-8876-eb7773a03df9" containerName="pruner" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.888727 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.894264 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.894619 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.895656 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-rh69l" podStartSLOduration=169.895626454 podStartE2EDuration="2m49.895626454s" podCreationTimestamp="2026-01-29 06:35:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:38:20.889606673 +0000 UTC m=+192.561101290" watchObservedRunningTime="2026-01-29 06:38:20.895626454 +0000 UTC m=+192.567121051" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.905622 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.938748 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:20 crc kubenswrapper[4861]: I0129 06:38:20.938996 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.041113 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.041222 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.041324 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.065146 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.266251 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.537666 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.849859 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896","Type":"ContainerStarted","Data":"151a7d6766ce597cd0bda776454249c795dbbde4a46bea11044e65c7c5178db9"} Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.854062 4861 generic.go:334] "Generic (PLEG): container finished" podID="409ca36f-ff5f-45de-b030-84876d623a63" containerID="186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953" exitCode=0 Jan 29 06:38:21 crc kubenswrapper[4861]: I0129 06:38:21.854168 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwd2x" event={"ID":"409ca36f-ff5f-45de-b030-84876d623a63","Type":"ContainerDied","Data":"186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953"} Jan 29 06:38:22 crc kubenswrapper[4861]: I0129 06:38:22.864522 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896","Type":"ContainerStarted","Data":"da22977c492b9438e877fb8366c73ed5f217c41799c880d745c1830e2e258f6c"} Jan 29 06:38:22 crc kubenswrapper[4861]: I0129 06:38:22.870387 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwd2x" event={"ID":"409ca36f-ff5f-45de-b030-84876d623a63","Type":"ContainerStarted","Data":"0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a"} Jan 29 06:38:22 crc kubenswrapper[4861]: I0129 06:38:22.918946 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nwd2x" podStartSLOduration=3.149401845 podStartE2EDuration="41.918922826s" podCreationTimestamp="2026-01-29 06:37:41 +0000 UTC" firstStartedPulling="2026-01-29 06:37:43.545163852 +0000 UTC m=+155.216658409" lastFinishedPulling="2026-01-29 06:38:22.314684823 +0000 UTC m=+193.986179390" observedRunningTime="2026-01-29 06:38:22.913104491 +0000 UTC m=+194.584599058" watchObservedRunningTime="2026-01-29 06:38:22.918922826 +0000 UTC m=+194.590417403" Jan 29 06:38:22 crc kubenswrapper[4861]: I0129 06:38:22.921015 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=2.920978871 podStartE2EDuration="2.920978871s" podCreationTimestamp="2026-01-29 06:38:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:38:22.88914819 +0000 UTC m=+194.560642757" watchObservedRunningTime="2026-01-29 06:38:22.920978871 +0000 UTC m=+194.592473448" Jan 29 06:38:23 crc kubenswrapper[4861]: I0129 06:38:23.878443 4861 generic.go:334] "Generic (PLEG): container finished" podID="3cdc1dbd-9613-459b-a2e9-c3e52ccd3896" containerID="da22977c492b9438e877fb8366c73ed5f217c41799c880d745c1830e2e258f6c" exitCode=0 Jan 29 06:38:23 crc kubenswrapper[4861]: I0129 06:38:23.878700 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896","Type":"ContainerDied","Data":"da22977c492b9438e877fb8366c73ed5f217c41799c880d745c1830e2e258f6c"} Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.184008 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.210366 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kube-api-access\") pod \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\" (UID: \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\") " Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.210530 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kubelet-dir\") pod \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\" (UID: \"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896\") " Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.210897 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3cdc1dbd-9613-459b-a2e9-c3e52ccd3896" (UID: "3cdc1dbd-9613-459b-a2e9-c3e52ccd3896"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.219607 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3cdc1dbd-9613-459b-a2e9-c3e52ccd3896" (UID: "3cdc1dbd-9613-459b-a2e9-c3e52ccd3896"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.312642 4861 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.312698 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3cdc1dbd-9613-459b-a2e9-c3e52ccd3896-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.668467 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 06:38:25 crc kubenswrapper[4861]: E0129 06:38:25.668661 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cdc1dbd-9613-459b-a2e9-c3e52ccd3896" containerName="pruner" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.668673 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cdc1dbd-9613-459b-a2e9-c3e52ccd3896" containerName="pruner" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.668801 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cdc1dbd-9613-459b-a2e9-c3e52ccd3896" containerName="pruner" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.669136 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.690658 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.835483 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3643d6fc-457f-4932-9e89-d5ed6e176e45-kube-api-access\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.835601 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.835651 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-var-lock\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.891893 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"3cdc1dbd-9613-459b-a2e9-c3e52ccd3896","Type":"ContainerDied","Data":"151a7d6766ce597cd0bda776454249c795dbbde4a46bea11044e65c7c5178db9"} Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.891961 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="151a7d6766ce597cd0bda776454249c795dbbde4a46bea11044e65c7c5178db9" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.891917 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.936982 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.937056 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-var-lock\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.937107 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3643d6fc-457f-4932-9e89-d5ed6e176e45-kube-api-access\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.937172 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-var-lock\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.937254 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.952860 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3643d6fc-457f-4932-9e89-d5ed6e176e45-kube-api-access\") pod \"installer-9-crc\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:25 crc kubenswrapper[4861]: I0129 06:38:25.987628 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:38:26 crc kubenswrapper[4861]: I0129 06:38:26.195798 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 06:38:26 crc kubenswrapper[4861]: I0129 06:38:26.898379 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3643d6fc-457f-4932-9e89-d5ed6e176e45","Type":"ContainerStarted","Data":"165bc480bc926601222881cf2cb69d5fabc891178afeb5f51c1dbc57d5133924"} Jan 29 06:38:26 crc kubenswrapper[4861]: I0129 06:38:26.898930 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3643d6fc-457f-4932-9e89-d5ed6e176e45","Type":"ContainerStarted","Data":"01275f49bff5b06940fb14dca99a57812f58df62d6ca3f8a847d6c04ee4c5549"} Jan 29 06:38:26 crc kubenswrapper[4861]: I0129 06:38:26.925911 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=1.9258839700000001 podStartE2EDuration="1.92588397s" podCreationTimestamp="2026-01-29 06:38:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:38:26.92363693 +0000 UTC m=+198.595131497" watchObservedRunningTime="2026-01-29 06:38:26.92588397 +0000 UTC m=+198.597378527" Jan 29 06:38:30 crc kubenswrapper[4861]: I0129 06:38:30.630479 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:38:30 crc kubenswrapper[4861]: I0129 06:38:30.630585 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:38:32 crc kubenswrapper[4861]: I0129 06:38:32.283527 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:38:32 crc kubenswrapper[4861]: I0129 06:38:32.284141 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:38:32 crc kubenswrapper[4861]: I0129 06:38:32.948103 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:38:33 crc kubenswrapper[4861]: I0129 06:38:33.023012 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:38:33 crc kubenswrapper[4861]: I0129 06:38:33.197819 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nwd2x"] Jan 29 06:38:33 crc kubenswrapper[4861]: I0129 06:38:33.944636 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-whxwx" event={"ID":"77c754cb-8b6c-4c93-940e-fb17e49b51e6","Type":"ContainerStarted","Data":"84ea732c3b8c40dab8109f2bd63b075ad7f6e6e85e52d71d890a8fdfb617faed"} Jan 29 06:38:34 crc kubenswrapper[4861]: I0129 06:38:34.954095 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b5hfw" event={"ID":"840bff63-3c46-4e8d-baa2-32315812df45","Type":"ContainerStarted","Data":"f48abefc5f75182e4cdf18152fd284ad397fab3e6e5444144965d17064bebe2e"} Jan 29 06:38:34 crc kubenswrapper[4861]: I0129 06:38:34.957668 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvlgr" event={"ID":"363fbc7b-db52-48a7-8789-c46f1304adfe","Type":"ContainerStarted","Data":"5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336"} Jan 29 06:38:34 crc kubenswrapper[4861]: I0129 06:38:34.960100 4861 generic.go:334] "Generic (PLEG): container finished" podID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerID="84ea732c3b8c40dab8109f2bd63b075ad7f6e6e85e52d71d890a8fdfb617faed" exitCode=0 Jan 29 06:38:34 crc kubenswrapper[4861]: I0129 06:38:34.960217 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-whxwx" event={"ID":"77c754cb-8b6c-4c93-940e-fb17e49b51e6","Type":"ContainerDied","Data":"84ea732c3b8c40dab8109f2bd63b075ad7f6e6e85e52d71d890a8fdfb617faed"} Jan 29 06:38:34 crc kubenswrapper[4861]: I0129 06:38:34.962404 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chs9" event={"ID":"65a563c5-87b1-4cc7-be2f-e8cdc6290eca","Type":"ContainerStarted","Data":"ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c"} Jan 29 06:38:34 crc kubenswrapper[4861]: I0129 06:38:34.962633 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nwd2x" podUID="409ca36f-ff5f-45de-b030-84876d623a63" containerName="registry-server" containerID="cri-o://0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a" gracePeriod=2 Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.355935 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.488375 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-catalog-content\") pod \"409ca36f-ff5f-45de-b030-84876d623a63\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.488493 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-utilities\") pod \"409ca36f-ff5f-45de-b030-84876d623a63\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.488608 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7vkv\" (UniqueName: \"kubernetes.io/projected/409ca36f-ff5f-45de-b030-84876d623a63-kube-api-access-h7vkv\") pod \"409ca36f-ff5f-45de-b030-84876d623a63\" (UID: \"409ca36f-ff5f-45de-b030-84876d623a63\") " Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.489347 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-utilities" (OuterVolumeSpecName: "utilities") pod "409ca36f-ff5f-45de-b030-84876d623a63" (UID: "409ca36f-ff5f-45de-b030-84876d623a63"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.496223 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/409ca36f-ff5f-45de-b030-84876d623a63-kube-api-access-h7vkv" (OuterVolumeSpecName: "kube-api-access-h7vkv") pod "409ca36f-ff5f-45de-b030-84876d623a63" (UID: "409ca36f-ff5f-45de-b030-84876d623a63"). InnerVolumeSpecName "kube-api-access-h7vkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.590357 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.590387 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7vkv\" (UniqueName: \"kubernetes.io/projected/409ca36f-ff5f-45de-b030-84876d623a63-kube-api-access-h7vkv\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.635587 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "409ca36f-ff5f-45de-b030-84876d623a63" (UID: "409ca36f-ff5f-45de-b030-84876d623a63"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.691902 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/409ca36f-ff5f-45de-b030-84876d623a63-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.991512 4861 generic.go:334] "Generic (PLEG): container finished" podID="409ca36f-ff5f-45de-b030-84876d623a63" containerID="0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a" exitCode=0 Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.991646 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nwd2x" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.991714 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwd2x" event={"ID":"409ca36f-ff5f-45de-b030-84876d623a63","Type":"ContainerDied","Data":"0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a"} Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.991772 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwd2x" event={"ID":"409ca36f-ff5f-45de-b030-84876d623a63","Type":"ContainerDied","Data":"917c74caffe555c84b86800855e0b7380f9f6d3447bdbee133376f826739cb72"} Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.991792 4861 scope.go:117] "RemoveContainer" containerID="0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a" Jan 29 06:38:35 crc kubenswrapper[4861]: I0129 06:38:35.997050 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4rxt" event={"ID":"6ee375d9-c78e-4c98-a04f-c004903dbf12","Type":"ContainerStarted","Data":"8cfa40fc5e3178de37dca99ead2ffb6916847fdd8db9dcc6fc6752ab01298cb9"} Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.002174 4861 generic.go:334] "Generic (PLEG): container finished" podID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerID="ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c" exitCode=0 Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.002223 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chs9" event={"ID":"65a563c5-87b1-4cc7-be2f-e8cdc6290eca","Type":"ContainerDied","Data":"ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c"} Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.007814 4861 generic.go:334] "Generic (PLEG): container finished" podID="840bff63-3c46-4e8d-baa2-32315812df45" containerID="f48abefc5f75182e4cdf18152fd284ad397fab3e6e5444144965d17064bebe2e" exitCode=0 Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.007988 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b5hfw" event={"ID":"840bff63-3c46-4e8d-baa2-32315812df45","Type":"ContainerDied","Data":"f48abefc5f75182e4cdf18152fd284ad397fab3e6e5444144965d17064bebe2e"} Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.011793 4861 generic.go:334] "Generic (PLEG): container finished" podID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerID="5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336" exitCode=0 Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.011843 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvlgr" event={"ID":"363fbc7b-db52-48a7-8789-c46f1304adfe","Type":"ContainerDied","Data":"5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336"} Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.016762 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-whxwx" event={"ID":"77c754cb-8b6c-4c93-940e-fb17e49b51e6","Type":"ContainerStarted","Data":"398d8b4115f86cb01cfb2d60fb53e0cfcc25574d8a811f1e070d966a593d9d4c"} Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.066743 4861 scope.go:117] "RemoveContainer" containerID="186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.082276 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-whxwx" podStartSLOduration=2.147573899 podStartE2EDuration="56.082255747s" podCreationTimestamp="2026-01-29 06:37:40 +0000 UTC" firstStartedPulling="2026-01-29 06:37:41.450344407 +0000 UTC m=+153.121838964" lastFinishedPulling="2026-01-29 06:38:35.385026235 +0000 UTC m=+207.056520812" observedRunningTime="2026-01-29 06:38:36.078965608 +0000 UTC m=+207.750460175" watchObservedRunningTime="2026-01-29 06:38:36.082255747 +0000 UTC m=+207.753750304" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.095265 4861 scope.go:117] "RemoveContainer" containerID="b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.123196 4861 scope.go:117] "RemoveContainer" containerID="0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a" Jan 29 06:38:36 crc kubenswrapper[4861]: E0129 06:38:36.124164 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a\": container with ID starting with 0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a not found: ID does not exist" containerID="0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.124199 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a"} err="failed to get container status \"0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a\": rpc error: code = NotFound desc = could not find container \"0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a\": container with ID starting with 0c03a90881f9bae378104ec29b71272a477f7f3bb1b2bbccd2d8c29ab8bd2b5a not found: ID does not exist" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.124251 4861 scope.go:117] "RemoveContainer" containerID="186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953" Jan 29 06:38:36 crc kubenswrapper[4861]: E0129 06:38:36.124580 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953\": container with ID starting with 186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953 not found: ID does not exist" containerID="186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.124617 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953"} err="failed to get container status \"186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953\": rpc error: code = NotFound desc = could not find container \"186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953\": container with ID starting with 186700d43f2ce9f0474756d4d41168ea829306339de126d88b99ad8e1436a953 not found: ID does not exist" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.124636 4861 scope.go:117] "RemoveContainer" containerID="b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100" Jan 29 06:38:36 crc kubenswrapper[4861]: E0129 06:38:36.125009 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100\": container with ID starting with b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100 not found: ID does not exist" containerID="b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.125058 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100"} err="failed to get container status \"b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100\": rpc error: code = NotFound desc = could not find container \"b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100\": container with ID starting with b7a504adc71e368a770f11261912ffd136e6c8710d8d467d9e4f02a19a212100 not found: ID does not exist" Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.135149 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nwd2x"] Jan 29 06:38:36 crc kubenswrapper[4861]: I0129 06:38:36.141099 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nwd2x"] Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.023995 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerID="92c618353f925332b79e56974f98214c789477aabbf9d60eaa52cff716aed54b" exitCode=0 Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.024107 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79pqx" event={"ID":"7d4408c0-2666-4112-ba0d-c9427a29fc66","Type":"ContainerDied","Data":"92c618353f925332b79e56974f98214c789477aabbf9d60eaa52cff716aed54b"} Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.027625 4861 generic.go:334] "Generic (PLEG): container finished" podID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerID="8cfa40fc5e3178de37dca99ead2ffb6916847fdd8db9dcc6fc6752ab01298cb9" exitCode=0 Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.027691 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4rxt" event={"ID":"6ee375d9-c78e-4c98-a04f-c004903dbf12","Type":"ContainerDied","Data":"8cfa40fc5e3178de37dca99ead2ffb6916847fdd8db9dcc6fc6752ab01298cb9"} Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.027709 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4rxt" event={"ID":"6ee375d9-c78e-4c98-a04f-c004903dbf12","Type":"ContainerStarted","Data":"8e8ede30a4d4a538163d26a90f46dc759aa32cb317538af95b24ccbfa64656c6"} Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.031410 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chs9" event={"ID":"65a563c5-87b1-4cc7-be2f-e8cdc6290eca","Type":"ContainerStarted","Data":"edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6"} Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.033362 4861 generic.go:334] "Generic (PLEG): container finished" podID="df326277-c54e-44f7-87df-260579302ade" containerID="4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11" exitCode=0 Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.033440 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cj66x" event={"ID":"df326277-c54e-44f7-87df-260579302ade","Type":"ContainerDied","Data":"4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11"} Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.035712 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b5hfw" event={"ID":"840bff63-3c46-4e8d-baa2-32315812df45","Type":"ContainerStarted","Data":"053e49e4cdf2556eeb1448e3d207d941d25190edad7baf91d26d6cf6bf5884c3"} Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.039155 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvlgr" event={"ID":"363fbc7b-db52-48a7-8789-c46f1304adfe","Type":"ContainerStarted","Data":"8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734"} Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.066452 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-b5hfw" podStartSLOduration=2.994151394 podStartE2EDuration="59.066429863s" podCreationTimestamp="2026-01-29 06:37:38 +0000 UTC" firstStartedPulling="2026-01-29 06:37:40.426462418 +0000 UTC m=+152.097956975" lastFinishedPulling="2026-01-29 06:38:36.498740887 +0000 UTC m=+208.170235444" observedRunningTime="2026-01-29 06:38:37.06370982 +0000 UTC m=+208.735204387" watchObservedRunningTime="2026-01-29 06:38:37.066429863 +0000 UTC m=+208.737924420" Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.089813 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4chs9" podStartSLOduration=3.120285937 podStartE2EDuration="57.089791398s" podCreationTimestamp="2026-01-29 06:37:40 +0000 UTC" firstStartedPulling="2026-01-29 06:37:42.479891197 +0000 UTC m=+154.151385754" lastFinishedPulling="2026-01-29 06:38:36.449396658 +0000 UTC m=+208.120891215" observedRunningTime="2026-01-29 06:38:37.084796234 +0000 UTC m=+208.756290811" watchObservedRunningTime="2026-01-29 06:38:37.089791398 +0000 UTC m=+208.761285955" Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.122736 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="409ca36f-ff5f-45de-b030-84876d623a63" path="/var/lib/kubelet/pods/409ca36f-ff5f-45de-b030-84876d623a63/volumes" Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.133445 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-z4rxt" podStartSLOduration=3.101886694 podStartE2EDuration="56.133426975s" podCreationTimestamp="2026-01-29 06:37:41 +0000 UTC" firstStartedPulling="2026-01-29 06:37:43.528297481 +0000 UTC m=+155.199792038" lastFinishedPulling="2026-01-29 06:38:36.559837772 +0000 UTC m=+208.231332319" observedRunningTime="2026-01-29 06:38:37.131433572 +0000 UTC m=+208.802928129" watchObservedRunningTime="2026-01-29 06:38:37.133426975 +0000 UTC m=+208.804921532" Jan 29 06:38:37 crc kubenswrapper[4861]: I0129 06:38:37.159890 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wvlgr" podStartSLOduration=3.098230878 podStartE2EDuration="59.159869982s" podCreationTimestamp="2026-01-29 06:37:38 +0000 UTC" firstStartedPulling="2026-01-29 06:37:40.418406443 +0000 UTC m=+152.089901000" lastFinishedPulling="2026-01-29 06:38:36.480045547 +0000 UTC m=+208.151540104" observedRunningTime="2026-01-29 06:38:37.153214254 +0000 UTC m=+208.824708821" watchObservedRunningTime="2026-01-29 06:38:37.159869982 +0000 UTC m=+208.831364529" Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.054517 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79pqx" event={"ID":"7d4408c0-2666-4112-ba0d-c9427a29fc66","Type":"ContainerStarted","Data":"90c8c54be52c4184fdb1c277adff8d4d4a01a80f606aacb12d029f2c69754db9"} Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.059594 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cj66x" event={"ID":"df326277-c54e-44f7-87df-260579302ade","Type":"ContainerStarted","Data":"4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48"} Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.075835 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-79pqx" podStartSLOduration=1.98166531 podStartE2EDuration="1m0.075809083s" podCreationTimestamp="2026-01-29 06:37:38 +0000 UTC" firstStartedPulling="2026-01-29 06:37:39.381063105 +0000 UTC m=+151.052557662" lastFinishedPulling="2026-01-29 06:38:37.475206878 +0000 UTC m=+209.146701435" observedRunningTime="2026-01-29 06:38:38.073794139 +0000 UTC m=+209.745288716" watchObservedRunningTime="2026-01-29 06:38:38.075809083 +0000 UTC m=+209.747303640" Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.114345 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cj66x" podStartSLOduration=2.87965423 podStartE2EDuration="1m0.114318373s" podCreationTimestamp="2026-01-29 06:37:38 +0000 UTC" firstStartedPulling="2026-01-29 06:37:40.425449891 +0000 UTC m=+152.096944438" lastFinishedPulling="2026-01-29 06:38:37.660114024 +0000 UTC m=+209.331608581" observedRunningTime="2026-01-29 06:38:38.092291954 +0000 UTC m=+209.763786521" watchObservedRunningTime="2026-01-29 06:38:38.114318373 +0000 UTC m=+209.785812990" Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.684181 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.684251 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.825583 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.825654 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:38:38 crc kubenswrapper[4861]: I0129 06:38:38.899210 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:38:39 crc kubenswrapper[4861]: I0129 06:38:39.010564 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:38:39 crc kubenswrapper[4861]: I0129 06:38:39.010622 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:38:39 crc kubenswrapper[4861]: I0129 06:38:39.207501 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:38:39 crc kubenswrapper[4861]: I0129 06:38:39.207547 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:38:39 crc kubenswrapper[4861]: I0129 06:38:39.249998 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:38:39 crc kubenswrapper[4861]: I0129 06:38:39.726488 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-79pqx" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="registry-server" probeResult="failure" output=< Jan 29 06:38:39 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 06:38:39 crc kubenswrapper[4861]: > Jan 29 06:38:40 crc kubenswrapper[4861]: I0129 06:38:40.052496 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-cj66x" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="registry-server" probeResult="failure" output=< Jan 29 06:38:40 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 06:38:40 crc kubenswrapper[4861]: > Jan 29 06:38:40 crc kubenswrapper[4861]: I0129 06:38:40.632023 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:38:40 crc kubenswrapper[4861]: I0129 06:38:40.632160 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:38:40 crc kubenswrapper[4861]: I0129 06:38:40.698256 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:38:41 crc kubenswrapper[4861]: I0129 06:38:41.027277 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:38:41 crc kubenswrapper[4861]: I0129 06:38:41.027913 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:38:41 crc kubenswrapper[4861]: I0129 06:38:41.094613 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:38:41 crc kubenswrapper[4861]: I0129 06:38:41.154879 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:38:41 crc kubenswrapper[4861]: I0129 06:38:41.155336 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:38:41 crc kubenswrapper[4861]: I0129 06:38:41.821694 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:38:41 crc kubenswrapper[4861]: I0129 06:38:41.821795 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:38:42 crc kubenswrapper[4861]: I0129 06:38:42.865900 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-z4rxt" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="registry-server" probeResult="failure" output=< Jan 29 06:38:42 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 06:38:42 crc kubenswrapper[4861]: > Jan 29 06:38:43 crc kubenswrapper[4861]: I0129 06:38:43.596123 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chs9"] Jan 29 06:38:43 crc kubenswrapper[4861]: I0129 06:38:43.596832 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4chs9" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerName="registry-server" containerID="cri-o://edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6" gracePeriod=2 Jan 29 06:38:44 crc kubenswrapper[4861]: I0129 06:38:44.778621 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:38:44 crc kubenswrapper[4861]: I0129 06:38:44.956664 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-522q8\" (UniqueName: \"kubernetes.io/projected/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-kube-api-access-522q8\") pod \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " Jan 29 06:38:44 crc kubenswrapper[4861]: I0129 06:38:44.956879 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-utilities\") pod \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " Jan 29 06:38:44 crc kubenswrapper[4861]: I0129 06:38:44.956955 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-catalog-content\") pod \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\" (UID: \"65a563c5-87b1-4cc7-be2f-e8cdc6290eca\") " Jan 29 06:38:44 crc kubenswrapper[4861]: I0129 06:38:44.957945 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-utilities" (OuterVolumeSpecName: "utilities") pod "65a563c5-87b1-4cc7-be2f-e8cdc6290eca" (UID: "65a563c5-87b1-4cc7-be2f-e8cdc6290eca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:38:44 crc kubenswrapper[4861]: I0129 06:38:44.967320 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-kube-api-access-522q8" (OuterVolumeSpecName: "kube-api-access-522q8") pod "65a563c5-87b1-4cc7-be2f-e8cdc6290eca" (UID: "65a563c5-87b1-4cc7-be2f-e8cdc6290eca"). InnerVolumeSpecName "kube-api-access-522q8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:38:44 crc kubenswrapper[4861]: I0129 06:38:44.986484 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "65a563c5-87b1-4cc7-be2f-e8cdc6290eca" (UID: "65a563c5-87b1-4cc7-be2f-e8cdc6290eca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.057950 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.057994 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.058018 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-522q8\" (UniqueName: \"kubernetes.io/projected/65a563c5-87b1-4cc7-be2f-e8cdc6290eca-kube-api-access-522q8\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.109299 4861 generic.go:334] "Generic (PLEG): container finished" podID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerID="edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6" exitCode=0 Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.109363 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4chs9" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.109407 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chs9" event={"ID":"65a563c5-87b1-4cc7-be2f-e8cdc6290eca","Type":"ContainerDied","Data":"edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6"} Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.109513 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4chs9" event={"ID":"65a563c5-87b1-4cc7-be2f-e8cdc6290eca","Type":"ContainerDied","Data":"ebe1c9fcec8542875a0018fc5cf1400dd063bd119fe55510ca1f8992bea6c252"} Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.109539 4861 scope.go:117] "RemoveContainer" containerID="edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.145301 4861 scope.go:117] "RemoveContainer" containerID="ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.176500 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chs9"] Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.185943 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4chs9"] Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.196968 4861 scope.go:117] "RemoveContainer" containerID="e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.225580 4861 scope.go:117] "RemoveContainer" containerID="edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6" Jan 29 06:38:45 crc kubenswrapper[4861]: E0129 06:38:45.226338 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6\": container with ID starting with edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6 not found: ID does not exist" containerID="edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.226398 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6"} err="failed to get container status \"edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6\": rpc error: code = NotFound desc = could not find container \"edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6\": container with ID starting with edff0d9eb68a4421d53ab74a950a327b9a970ec379453a8c16b247eaaa640ff6 not found: ID does not exist" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.226442 4861 scope.go:117] "RemoveContainer" containerID="ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c" Jan 29 06:38:45 crc kubenswrapper[4861]: E0129 06:38:45.226968 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c\": container with ID starting with ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c not found: ID does not exist" containerID="ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.227006 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c"} err="failed to get container status \"ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c\": rpc error: code = NotFound desc = could not find container \"ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c\": container with ID starting with ff2e9a41a2add268d25651acdeaaee1bd6f7dabb68a1f24452222ad508d68d4c not found: ID does not exist" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.227022 4861 scope.go:117] "RemoveContainer" containerID="e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3" Jan 29 06:38:45 crc kubenswrapper[4861]: E0129 06:38:45.227320 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3\": container with ID starting with e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3 not found: ID does not exist" containerID="e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3" Jan 29 06:38:45 crc kubenswrapper[4861]: I0129 06:38:45.227341 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3"} err="failed to get container status \"e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3\": rpc error: code = NotFound desc = could not find container \"e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3\": container with ID starting with e74f85cd682393361d34566ddacb3d28c277b25dd648519eba1eab6cf5aeebf3 not found: ID does not exist" Jan 29 06:38:47 crc kubenswrapper[4861]: I0129 06:38:47.127724 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" path="/var/lib/kubelet/pods/65a563c5-87b1-4cc7-be2f-e8cdc6290eca/volumes" Jan 29 06:38:48 crc kubenswrapper[4861]: I0129 06:38:48.767015 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:38:48 crc kubenswrapper[4861]: I0129 06:38:48.844820 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:38:48 crc kubenswrapper[4861]: I0129 06:38:48.929832 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:38:49 crc kubenswrapper[4861]: I0129 06:38:49.076229 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:38:49 crc kubenswrapper[4861]: I0129 06:38:49.147194 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:38:49 crc kubenswrapper[4861]: I0129 06:38:49.269302 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.195880 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cj66x"] Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.197797 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cj66x" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="registry-server" containerID="cri-o://4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48" gracePeriod=2 Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.663676 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.767134 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-utilities\") pod \"df326277-c54e-44f7-87df-260579302ade\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.767287 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28sxd\" (UniqueName: \"kubernetes.io/projected/df326277-c54e-44f7-87df-260579302ade-kube-api-access-28sxd\") pod \"df326277-c54e-44f7-87df-260579302ade\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.767347 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-catalog-content\") pod \"df326277-c54e-44f7-87df-260579302ade\" (UID: \"df326277-c54e-44f7-87df-260579302ade\") " Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.768811 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-utilities" (OuterVolumeSpecName: "utilities") pod "df326277-c54e-44f7-87df-260579302ade" (UID: "df326277-c54e-44f7-87df-260579302ade"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.778374 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df326277-c54e-44f7-87df-260579302ade-kube-api-access-28sxd" (OuterVolumeSpecName: "kube-api-access-28sxd") pod "df326277-c54e-44f7-87df-260579302ade" (UID: "df326277-c54e-44f7-87df-260579302ade"). InnerVolumeSpecName "kube-api-access-28sxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.824020 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-s46nf"] Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.842357 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df326277-c54e-44f7-87df-260579302ade" (UID: "df326277-c54e-44f7-87df-260579302ade"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.869315 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28sxd\" (UniqueName: \"kubernetes.io/projected/df326277-c54e-44f7-87df-260579302ade-kube-api-access-28sxd\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.869620 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:50 crc kubenswrapper[4861]: I0129 06:38:50.869683 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df326277-c54e-44f7-87df-260579302ade-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.163272 4861 generic.go:334] "Generic (PLEG): container finished" podID="df326277-c54e-44f7-87df-260579302ade" containerID="4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48" exitCode=0 Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.163327 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cj66x" event={"ID":"df326277-c54e-44f7-87df-260579302ade","Type":"ContainerDied","Data":"4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48"} Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.163363 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cj66x" event={"ID":"df326277-c54e-44f7-87df-260579302ade","Type":"ContainerDied","Data":"96bbca63e8f71dc06c9bb61ee46c7e161fb53b97be104c7a39b7845cd7673005"} Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.163364 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cj66x" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.163388 4861 scope.go:117] "RemoveContainer" containerID="4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.188232 4861 scope.go:117] "RemoveContainer" containerID="4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.190724 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cj66x"] Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.207356 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cj66x"] Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.216706 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wvlgr"] Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.216991 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wvlgr" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerName="registry-server" containerID="cri-o://8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734" gracePeriod=2 Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.225432 4861 scope.go:117] "RemoveContainer" containerID="115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb" Jan 29 06:38:51 crc kubenswrapper[4861]: E0129 06:38:51.263345 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf326277_c54e_44f7_87df_260579302ade.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf326277_c54e_44f7_87df_260579302ade.slice/crio-96bbca63e8f71dc06c9bb61ee46c7e161fb53b97be104c7a39b7845cd7673005\": RecentStats: unable to find data in memory cache]" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.266784 4861 scope.go:117] "RemoveContainer" containerID="4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48" Jan 29 06:38:51 crc kubenswrapper[4861]: E0129 06:38:51.268680 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48\": container with ID starting with 4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48 not found: ID does not exist" containerID="4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.268767 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48"} err="failed to get container status \"4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48\": rpc error: code = NotFound desc = could not find container \"4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48\": container with ID starting with 4f67181f07a3fbd59e0db99d4e7c3cebfc651490e829a7894a2655bc54b63d48 not found: ID does not exist" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.268824 4861 scope.go:117] "RemoveContainer" containerID="4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11" Jan 29 06:38:51 crc kubenswrapper[4861]: E0129 06:38:51.269309 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11\": container with ID starting with 4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11 not found: ID does not exist" containerID="4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.269350 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11"} err="failed to get container status \"4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11\": rpc error: code = NotFound desc = could not find container \"4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11\": container with ID starting with 4c83e31a6d3126dfbc47b9656f32c446aba6418e489d2476598e2d65a1805b11 not found: ID does not exist" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.269406 4861 scope.go:117] "RemoveContainer" containerID="115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb" Jan 29 06:38:51 crc kubenswrapper[4861]: E0129 06:38:51.269675 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb\": container with ID starting with 115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb not found: ID does not exist" containerID="115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.269698 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb"} err="failed to get container status \"115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb\": rpc error: code = NotFound desc = could not find container \"115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb\": container with ID starting with 115906c810aa87aa41199745158f2545417434a336d4ae3b3748ef63359e94bb not found: ID does not exist" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.614362 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.781478 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-catalog-content\") pod \"363fbc7b-db52-48a7-8789-c46f1304adfe\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.781665 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wlgs\" (UniqueName: \"kubernetes.io/projected/363fbc7b-db52-48a7-8789-c46f1304adfe-kube-api-access-2wlgs\") pod \"363fbc7b-db52-48a7-8789-c46f1304adfe\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.781706 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-utilities\") pod \"363fbc7b-db52-48a7-8789-c46f1304adfe\" (UID: \"363fbc7b-db52-48a7-8789-c46f1304adfe\") " Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.782773 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-utilities" (OuterVolumeSpecName: "utilities") pod "363fbc7b-db52-48a7-8789-c46f1304adfe" (UID: "363fbc7b-db52-48a7-8789-c46f1304adfe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.795787 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/363fbc7b-db52-48a7-8789-c46f1304adfe-kube-api-access-2wlgs" (OuterVolumeSpecName: "kube-api-access-2wlgs") pod "363fbc7b-db52-48a7-8789-c46f1304adfe" (UID: "363fbc7b-db52-48a7-8789-c46f1304adfe"). InnerVolumeSpecName "kube-api-access-2wlgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.833918 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "363fbc7b-db52-48a7-8789-c46f1304adfe" (UID: "363fbc7b-db52-48a7-8789-c46f1304adfe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.880094 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.883462 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wlgs\" (UniqueName: \"kubernetes.io/projected/363fbc7b-db52-48a7-8789-c46f1304adfe-kube-api-access-2wlgs\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.883532 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.883548 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/363fbc7b-db52-48a7-8789-c46f1304adfe-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:38:51 crc kubenswrapper[4861]: I0129 06:38:51.932991 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.172022 4861 generic.go:334] "Generic (PLEG): container finished" podID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerID="8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734" exitCode=0 Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.172135 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wvlgr" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.172152 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvlgr" event={"ID":"363fbc7b-db52-48a7-8789-c46f1304adfe","Type":"ContainerDied","Data":"8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734"} Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.172204 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wvlgr" event={"ID":"363fbc7b-db52-48a7-8789-c46f1304adfe","Type":"ContainerDied","Data":"b1f90b7e6225c61dab3ac188a516edecaac768a3402c4588634e8ac5dfe76213"} Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.172227 4861 scope.go:117] "RemoveContainer" containerID="8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.198711 4861 scope.go:117] "RemoveContainer" containerID="5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.223617 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wvlgr"] Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.234281 4861 scope.go:117] "RemoveContainer" containerID="7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.237804 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wvlgr"] Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.268476 4861 scope.go:117] "RemoveContainer" containerID="8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734" Jan 29 06:38:52 crc kubenswrapper[4861]: E0129 06:38:52.270240 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734\": container with ID starting with 8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734 not found: ID does not exist" containerID="8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.270319 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734"} err="failed to get container status \"8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734\": rpc error: code = NotFound desc = could not find container \"8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734\": container with ID starting with 8e6dd9fb502120725568c2536d824669d465c9b3997cebdb8106734d97ad9734 not found: ID does not exist" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.270359 4861 scope.go:117] "RemoveContainer" containerID="5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336" Jan 29 06:38:52 crc kubenswrapper[4861]: E0129 06:38:52.273473 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336\": container with ID starting with 5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336 not found: ID does not exist" containerID="5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.273518 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336"} err="failed to get container status \"5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336\": rpc error: code = NotFound desc = could not find container \"5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336\": container with ID starting with 5fac6df2c0dfe3a9afab6e908b1a74238823379b842e5f153f7a1039ca118336 not found: ID does not exist" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.273533 4861 scope.go:117] "RemoveContainer" containerID="7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236" Jan 29 06:38:52 crc kubenswrapper[4861]: E0129 06:38:52.277268 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236\": container with ID starting with 7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236 not found: ID does not exist" containerID="7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236" Jan 29 06:38:52 crc kubenswrapper[4861]: I0129 06:38:52.277296 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236"} err="failed to get container status \"7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236\": rpc error: code = NotFound desc = could not find container \"7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236\": container with ID starting with 7a9311b7e2063a4865de8798cd3458416e2dd41fd9c3176245b1e58ea0c74236 not found: ID does not exist" Jan 29 06:38:53 crc kubenswrapper[4861]: I0129 06:38:53.129694 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" path="/var/lib/kubelet/pods/363fbc7b-db52-48a7-8789-c46f1304adfe/volumes" Jan 29 06:38:53 crc kubenswrapper[4861]: I0129 06:38:53.131528 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df326277-c54e-44f7-87df-260579302ade" path="/var/lib/kubelet/pods/df326277-c54e-44f7-87df-260579302ade/volumes" Jan 29 06:39:00 crc kubenswrapper[4861]: I0129 06:39:00.629690 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:39:00 crc kubenswrapper[4861]: I0129 06:39:00.630810 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:39:00 crc kubenswrapper[4861]: I0129 06:39:00.630898 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:39:00 crc kubenswrapper[4861]: I0129 06:39:00.631895 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 06:39:00 crc kubenswrapper[4861]: I0129 06:39:00.632000 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3" gracePeriod=600 Jan 29 06:39:01 crc kubenswrapper[4861]: I0129 06:39:01.249941 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3" exitCode=0 Jan 29 06:39:01 crc kubenswrapper[4861]: I0129 06:39:01.250648 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3"} Jan 29 06:39:01 crc kubenswrapper[4861]: I0129 06:39:01.250701 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"36705bd8ad43bcb9108d63a8d54cd293b1f2d38e6068c9bf489c044dc50abf8d"} Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.320173 4861 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.320884 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="409ca36f-ff5f-45de-b030-84876d623a63" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.320908 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="409ca36f-ff5f-45de-b030-84876d623a63" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.320927 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerName="extract-utilities" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.320941 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerName="extract-utilities" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.320960 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="extract-content" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.320977 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="extract-content" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.320996 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321009 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.321028 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="409ca36f-ff5f-45de-b030-84876d623a63" containerName="extract-content" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321041 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="409ca36f-ff5f-45de-b030-84876d623a63" containerName="extract-content" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.321061 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="409ca36f-ff5f-45de-b030-84876d623a63" containerName="extract-utilities" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321104 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="409ca36f-ff5f-45de-b030-84876d623a63" containerName="extract-utilities" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.321126 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerName="extract-content" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321140 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerName="extract-content" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.321162 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerName="extract-utilities" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321178 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerName="extract-utilities" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.321194 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="extract-utilities" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321209 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="extract-utilities" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.321226 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerName="extract-content" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321240 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerName="extract-content" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.321258 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321272 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.321289 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321302 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321494 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="df326277-c54e-44f7-87df-260579302ade" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321526 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="65a563c5-87b1-4cc7-be2f-e8cdc6290eca" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321542 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="409ca36f-ff5f-45de-b030-84876d623a63" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.321563 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="363fbc7b-db52-48a7-8789-c46f1304adfe" containerName="registry-server" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.322172 4861 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.322356 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.322702 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39" gracePeriod=15 Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.322768 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381" gracePeriod=15 Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.322880 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e" gracePeriod=15 Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.322906 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd" gracePeriod=15 Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.322886 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f" gracePeriod=15 Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.323887 4861 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.324216 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324230 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.324242 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324252 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.324266 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324274 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.324283 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324291 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.324305 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324313 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.324330 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324339 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.324352 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324360 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324491 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324508 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324521 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324538 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324554 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.324571 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.403550 4861 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.80:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.415805 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.415861 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.415894 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.416025 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.416107 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.416180 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.416217 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.416343 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517095 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517137 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517166 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517184 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517209 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517228 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517281 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517303 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517359 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517403 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517448 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517471 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517503 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517506 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517519 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.517537 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: I0129 06:39:04.705514 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:04 crc kubenswrapper[4861]: W0129 06:39:04.733631 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-8765e68a739876557e8f9cb6ae7354dee4f119a7fad2dfe6bdd20b2df706dd2b WatchSource:0}: Error finding container 8765e68a739876557e8f9cb6ae7354dee4f119a7fad2dfe6bdd20b2df706dd2b: Status 404 returned error can't find the container with id 8765e68a739876557e8f9cb6ae7354dee4f119a7fad2dfe6bdd20b2df706dd2b Jan 29 06:39:04 crc kubenswrapper[4861]: E0129 06:39:04.736687 4861 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.80:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f2058e41ac0a9 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 06:39:04.736063657 +0000 UTC m=+236.407558254,LastTimestamp:2026-01-29 06:39:04.736063657 +0000 UTC m=+236.407558254,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.284112 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.286919 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.287975 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e" exitCode=0 Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.288021 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381" exitCode=0 Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.288036 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f" exitCode=0 Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.288054 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd" exitCode=2 Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.288190 4861 scope.go:117] "RemoveContainer" containerID="56e8816a20eaf9e19386544d5d8f2139e391bfba85e2f88d8d07e57505241515" Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.290922 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"a96fc85349ef18db88384fa595ca86e8ca949d305c36b2be39c2dd8ad3532a46"} Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.291012 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"8765e68a739876557e8f9cb6ae7354dee4f119a7fad2dfe6bdd20b2df706dd2b"} Jan 29 06:39:05 crc kubenswrapper[4861]: E0129 06:39:05.292827 4861 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.80:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.293434 4861 generic.go:334] "Generic (PLEG): container finished" podID="3643d6fc-457f-4932-9e89-d5ed6e176e45" containerID="165bc480bc926601222881cf2cb69d5fabc891178afeb5f51c1dbc57d5133924" exitCode=0 Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.293506 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3643d6fc-457f-4932-9e89-d5ed6e176e45","Type":"ContainerDied","Data":"165bc480bc926601222881cf2cb69d5fabc891178afeb5f51c1dbc57d5133924"} Jan 29 06:39:05 crc kubenswrapper[4861]: I0129 06:39:05.294530 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:05 crc kubenswrapper[4861]: E0129 06:39:05.395932 4861 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.80:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f2058e41ac0a9 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 06:39:04.736063657 +0000 UTC m=+236.407558254,LastTimestamp:2026-01-29 06:39:04.736063657 +0000 UTC m=+236.407558254,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.304586 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.755842 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.756913 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.761958 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.762836 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.763374 4861 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.763696 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768313 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-kubelet-dir\") pod \"3643d6fc-457f-4932-9e89-d5ed6e176e45\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768371 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768439 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-var-lock\") pod \"3643d6fc-457f-4932-9e89-d5ed6e176e45\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768447 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3643d6fc-457f-4932-9e89-d5ed6e176e45" (UID: "3643d6fc-457f-4932-9e89-d5ed6e176e45"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768507 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768541 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768592 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3643d6fc-457f-4932-9e89-d5ed6e176e45-kube-api-access\") pod \"3643d6fc-457f-4932-9e89-d5ed6e176e45\" (UID: \"3643d6fc-457f-4932-9e89-d5ed6e176e45\") " Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768622 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-var-lock" (OuterVolumeSpecName: "var-lock") pod "3643d6fc-457f-4932-9e89-d5ed6e176e45" (UID: "3643d6fc-457f-4932-9e89-d5ed6e176e45"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768664 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768673 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.768770 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.769292 4861 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-var-lock\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.769352 4861 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.769372 4861 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.769390 4861 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3643d6fc-457f-4932-9e89-d5ed6e176e45-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.769406 4861 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.774006 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3643d6fc-457f-4932-9e89-d5ed6e176e45-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3643d6fc-457f-4932-9e89-d5ed6e176e45" (UID: "3643d6fc-457f-4932-9e89-d5ed6e176e45"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:39:06 crc kubenswrapper[4861]: I0129 06:39:06.871590 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3643d6fc-457f-4932-9e89-d5ed6e176e45-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.127204 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.319664 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.321170 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39" exitCode=0 Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.321325 4861 scope.go:117] "RemoveContainer" containerID="87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.321326 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.322362 4861 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.323315 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.324345 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3643d6fc-457f-4932-9e89-d5ed6e176e45","Type":"ContainerDied","Data":"01275f49bff5b06940fb14dca99a57812f58df62d6ca3f8a847d6c04ee4c5549"} Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.324387 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="01275f49bff5b06940fb14dca99a57812f58df62d6ca3f8a847d6c04ee4c5549" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.324452 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.328328 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.330121 4861 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.335972 4861 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.336558 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.355373 4861 scope.go:117] "RemoveContainer" containerID="1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.385606 4861 scope.go:117] "RemoveContainer" containerID="f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.415125 4861 scope.go:117] "RemoveContainer" containerID="79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.446864 4861 scope.go:117] "RemoveContainer" containerID="6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.479589 4861 scope.go:117] "RemoveContainer" containerID="88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.525622 4861 scope.go:117] "RemoveContainer" containerID="87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e" Jan 29 06:39:07 crc kubenswrapper[4861]: E0129 06:39:07.526975 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\": container with ID starting with 87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e not found: ID does not exist" containerID="87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.527042 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e"} err="failed to get container status \"87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\": rpc error: code = NotFound desc = could not find container \"87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e\": container with ID starting with 87e68c66d75f1849a95f50b26193d0997f2fe7e13f8c608803a3814f6f687a1e not found: ID does not exist" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.527131 4861 scope.go:117] "RemoveContainer" containerID="1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381" Jan 29 06:39:07 crc kubenswrapper[4861]: E0129 06:39:07.527831 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\": container with ID starting with 1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381 not found: ID does not exist" containerID="1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.527919 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381"} err="failed to get container status \"1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\": rpc error: code = NotFound desc = could not find container \"1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381\": container with ID starting with 1a9ac32f9b1fb55546dc14ef6a8e50d0f79fd9bd30f360e438c2acecbbdc5381 not found: ID does not exist" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.527974 4861 scope.go:117] "RemoveContainer" containerID="f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f" Jan 29 06:39:07 crc kubenswrapper[4861]: E0129 06:39:07.528515 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\": container with ID starting with f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f not found: ID does not exist" containerID="f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.528580 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f"} err="failed to get container status \"f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\": rpc error: code = NotFound desc = could not find container \"f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f\": container with ID starting with f60bde48082626313ed8be7b13f3c4a3b9239b704e77a2d75afb6689e2818b9f not found: ID does not exist" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.528622 4861 scope.go:117] "RemoveContainer" containerID="79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd" Jan 29 06:39:07 crc kubenswrapper[4861]: E0129 06:39:07.529914 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\": container with ID starting with 79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd not found: ID does not exist" containerID="79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.529984 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd"} err="failed to get container status \"79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\": rpc error: code = NotFound desc = could not find container \"79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd\": container with ID starting with 79e022adafa8840949dfe3fe497c7b42c62b337143b4f429559d936e340052fd not found: ID does not exist" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.530038 4861 scope.go:117] "RemoveContainer" containerID="6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39" Jan 29 06:39:07 crc kubenswrapper[4861]: E0129 06:39:07.530773 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\": container with ID starting with 6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39 not found: ID does not exist" containerID="6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.530826 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39"} err="failed to get container status \"6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\": rpc error: code = NotFound desc = could not find container \"6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39\": container with ID starting with 6093e0f1ec08b588f76952cf4c46ba7c6d0e0558bfc00bf8b0def403e73fbe39 not found: ID does not exist" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.530874 4861 scope.go:117] "RemoveContainer" containerID="88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860" Jan 29 06:39:07 crc kubenswrapper[4861]: E0129 06:39:07.532117 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\": container with ID starting with 88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860 not found: ID does not exist" containerID="88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860" Jan 29 06:39:07 crc kubenswrapper[4861]: I0129 06:39:07.532178 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860"} err="failed to get container status \"88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\": rpc error: code = NotFound desc = could not find container \"88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860\": container with ID starting with 88edc4da2743d8564c47abdd9e4d6f6b98f371550e66099044e7ff3f1b62b860 not found: ID does not exist" Jan 29 06:39:09 crc kubenswrapper[4861]: I0129 06:39:09.121626 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:09 crc kubenswrapper[4861]: I0129 06:39:09.122351 4861 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:09 crc kubenswrapper[4861]: E0129 06:39:09.725880 4861 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:09 crc kubenswrapper[4861]: E0129 06:39:09.726443 4861 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:09 crc kubenswrapper[4861]: E0129 06:39:09.727013 4861 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:09 crc kubenswrapper[4861]: E0129 06:39:09.727948 4861 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:09 crc kubenswrapper[4861]: E0129 06:39:09.728507 4861 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:09 crc kubenswrapper[4861]: I0129 06:39:09.728534 4861 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 29 06:39:09 crc kubenswrapper[4861]: E0129 06:39:09.728753 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="200ms" Jan 29 06:39:09 crc kubenswrapper[4861]: E0129 06:39:09.930280 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="400ms" Jan 29 06:39:10 crc kubenswrapper[4861]: E0129 06:39:10.336111 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="800ms" Jan 29 06:39:11 crc kubenswrapper[4861]: E0129 06:39:11.137295 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="1.6s" Jan 29 06:39:12 crc kubenswrapper[4861]: E0129 06:39:12.739391 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="3.2s" Jan 29 06:39:15 crc kubenswrapper[4861]: E0129 06:39:15.396627 4861 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.80:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f2058e41ac0a9 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 06:39:04.736063657 +0000 UTC m=+236.407558254,LastTimestamp:2026-01-29 06:39:04.736063657 +0000 UTC m=+236.407558254,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 06:39:15 crc kubenswrapper[4861]: I0129 06:39:15.857333 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" containerName="oauth-openshift" containerID="cri-o://440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010" gracePeriod=15 Jan 29 06:39:15 crc kubenswrapper[4861]: E0129 06:39:15.940851 4861 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.80:6443: connect: connection refused" interval="6.4s" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.116460 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.117964 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.135150 4861 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.135207 4861 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:16 crc kubenswrapper[4861]: E0129 06:39:16.135965 4861 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.136969 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.316733 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.317644 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.318227 4861 status_manager.go:851] "Failed to get status for pod" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-s46nf\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.405448 4861 generic.go:334] "Generic (PLEG): container finished" podID="407f7505-8386-467a-9b71-e1aea70b9c3d" containerID="440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010" exitCode=0 Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.405595 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.405670 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" event={"ID":"407f7505-8386-467a-9b71-e1aea70b9c3d","Type":"ContainerDied","Data":"440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010"} Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.405721 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" event={"ID":"407f7505-8386-467a-9b71-e1aea70b9c3d","Type":"ContainerDied","Data":"49eb4def00759e3ed82b1f5bd83119cf9b792cddccf7241eb97ca90e3759788f"} Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.405799 4861 scope.go:117] "RemoveContainer" containerID="440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.406661 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.406967 4861 status_manager.go:851] "Failed to get status for pod" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-s46nf\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.407683 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fd9a4a6c60830b9308d5bb75b62480ebdaa8a1da57109a3fd9e65803671bff30"} Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426705 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-policies\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426766 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-ocp-branding-template\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426810 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-idp-0-file-data\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426829 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-router-certs\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426870 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-serving-cert\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426893 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-dir\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426911 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-service-ca\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426959 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-trusted-ca-bundle\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.426985 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-cliconfig\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.427010 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-provider-selection\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.427035 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59ktg\" (UniqueName: \"kubernetes.io/projected/407f7505-8386-467a-9b71-e1aea70b9c3d-kube-api-access-59ktg\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.427065 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-login\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.427109 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-error\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.427131 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-session\") pod \"407f7505-8386-467a-9b71-e1aea70b9c3d\" (UID: \"407f7505-8386-467a-9b71-e1aea70b9c3d\") " Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.427744 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.428056 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.428665 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.429777 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.431277 4861 scope.go:117] "RemoveContainer" containerID="440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010" Jan 29 06:39:16 crc kubenswrapper[4861]: E0129 06:39:16.432975 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010\": container with ID starting with 440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010 not found: ID does not exist" containerID="440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.433025 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010"} err="failed to get container status \"440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010\": rpc error: code = NotFound desc = could not find container \"440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010\": container with ID starting with 440a563274e164bae7ad04483c89486143b0c5488cf2b44ef63753b39bc28010 not found: ID does not exist" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.433711 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.434934 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.434954 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.435177 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.435567 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/407f7505-8386-467a-9b71-e1aea70b9c3d-kube-api-access-59ktg" (OuterVolumeSpecName: "kube-api-access-59ktg") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "kube-api-access-59ktg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.435787 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.436006 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.436260 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.436900 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.437296 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "407f7505-8386-467a-9b71-e1aea70b9c3d" (UID: "407f7505-8386-467a-9b71-e1aea70b9c3d"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528392 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528423 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528436 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59ktg\" (UniqueName: \"kubernetes.io/projected/407f7505-8386-467a-9b71-e1aea70b9c3d-kube-api-access-59ktg\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528445 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528456 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528465 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528474 4861 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528483 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528491 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528524 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528533 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528542 4861 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/407f7505-8386-467a-9b71-e1aea70b9c3d-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528552 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.528562 4861 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/407f7505-8386-467a-9b71-e1aea70b9c3d-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.725222 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:16 crc kubenswrapper[4861]: I0129 06:39:16.725657 4861 status_manager.go:851] "Failed to get status for pod" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-s46nf\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.421395 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.421468 4861 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0" exitCode=1 Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.421554 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0"} Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.422225 4861 scope.go:117] "RemoveContainer" containerID="7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.422693 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.423345 4861 status_manager.go:851] "Failed to get status for pod" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-s46nf\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.423758 4861 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.425446 4861 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="8e1506da704dbdb68143126257d07369cc08f6cee01cffcac31258ceca14ea6c" exitCode=0 Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.425531 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"8e1506da704dbdb68143126257d07369cc08f6cee01cffcac31258ceca14ea6c"} Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.425939 4861 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.426032 4861 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.426212 4861 status_manager.go:851] "Failed to get status for pod" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" pod="openshift-authentication/oauth-openshift-558db77b4-s46nf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-s46nf\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:17 crc kubenswrapper[4861]: E0129 06:39:17.426721 4861 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.426766 4861 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:17 crc kubenswrapper[4861]: I0129 06:39:17.427386 4861 status_manager.go:851] "Failed to get status for pod" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.80:6443: connect: connection refused" Jan 29 06:39:18 crc kubenswrapper[4861]: I0129 06:39:18.444153 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8fe45f90e6ef472a04c01d651584211d77e50909b25ba292b5278e79bf009589"} Jan 29 06:39:18 crc kubenswrapper[4861]: I0129 06:39:18.444697 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"faf66bcfc6d682c9d4a60ef54049725ae7d4b27213f62da145791222b2422a92"} Jan 29 06:39:18 crc kubenswrapper[4861]: I0129 06:39:18.444709 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7a20ae62c2d69a4681c85767d9cf30ff102022d889666b45a7223ebbd09c7f66"} Jan 29 06:39:18 crc kubenswrapper[4861]: I0129 06:39:18.451704 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 06:39:18 crc kubenswrapper[4861]: I0129 06:39:18.451771 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"29d51fc79f074b940ea28ee068c82984b43224c1716f5f182242c8edfbaff7bf"} Jan 29 06:39:19 crc kubenswrapper[4861]: I0129 06:39:19.460119 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"05c1d8b2945dae929d4d3109ad37d53ca4faec3057a44cf9af2139283fb6b92c"} Jan 29 06:39:19 crc kubenswrapper[4861]: I0129 06:39:19.460401 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:19 crc kubenswrapper[4861]: I0129 06:39:19.460412 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"11f613afd7bde0067ae6135615f0af9b85b538d7a7328599ff928642e1227eb4"} Jan 29 06:39:19 crc kubenswrapper[4861]: I0129 06:39:19.460458 4861 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:19 crc kubenswrapper[4861]: I0129 06:39:19.460491 4861 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:21 crc kubenswrapper[4861]: I0129 06:39:21.138799 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:21 crc kubenswrapper[4861]: I0129 06:39:21.138869 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:21 crc kubenswrapper[4861]: I0129 06:39:21.148589 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:22 crc kubenswrapper[4861]: I0129 06:39:22.647337 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:39:22 crc kubenswrapper[4861]: I0129 06:39:22.647550 4861 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 29 06:39:22 crc kubenswrapper[4861]: I0129 06:39:22.649573 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 29 06:39:24 crc kubenswrapper[4861]: I0129 06:39:24.384670 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:39:24 crc kubenswrapper[4861]: I0129 06:39:24.474055 4861 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:24 crc kubenswrapper[4861]: I0129 06:39:24.507703 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="f4e9048b-ccf9-4562-880d-119e787aecc2" Jan 29 06:39:25 crc kubenswrapper[4861]: I0129 06:39:25.500736 4861 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:25 crc kubenswrapper[4861]: I0129 06:39:25.501226 4861 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:25 crc kubenswrapper[4861]: I0129 06:39:25.508376 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="f4e9048b-ccf9-4562-880d-119e787aecc2" Jan 29 06:39:32 crc kubenswrapper[4861]: I0129 06:39:32.647283 4861 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 29 06:39:32 crc kubenswrapper[4861]: I0129 06:39:32.648531 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 29 06:39:34 crc kubenswrapper[4861]: I0129 06:39:34.880376 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 29 06:39:35 crc kubenswrapper[4861]: I0129 06:39:35.499238 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 29 06:39:35 crc kubenswrapper[4861]: I0129 06:39:35.911500 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 06:39:35 crc kubenswrapper[4861]: I0129 06:39:35.921933 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 29 06:39:35 crc kubenswrapper[4861]: I0129 06:39:35.927328 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 29 06:39:35 crc kubenswrapper[4861]: I0129 06:39:35.997722 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.106285 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.160841 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.210236 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.268036 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.406380 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.452099 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.487540 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.797413 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 29 06:39:36 crc kubenswrapper[4861]: I0129 06:39:36.939101 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.001326 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.017454 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.165873 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.218411 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.247251 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.383985 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.508151 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.589923 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.606206 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.661333 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.761460 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 29 06:39:37 crc kubenswrapper[4861]: I0129 06:39:37.987348 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:37.999997 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.066930 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.100440 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.162413 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.231845 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.267436 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.434128 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.460686 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.669341 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.713320 4861 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.915727 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.957681 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 29 06:39:38 crc kubenswrapper[4861]: I0129 06:39:38.986012 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.042591 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.106903 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.276170 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.432028 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.496062 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.501334 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.504724 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.598559 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.643912 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.659440 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.682024 4861 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.758405 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.817906 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.895931 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.921596 4861 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.968363 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 29 06:39:39 crc kubenswrapper[4861]: I0129 06:39:39.970478 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.043216 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.075226 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.094921 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.162931 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.204364 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.336916 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.416765 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.517511 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.523778 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.614597 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.710281 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.780593 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.786408 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.816585 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.949819 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 29 06:39:40 crc kubenswrapper[4861]: I0129 06:39:40.966657 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.018559 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.057905 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.116225 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.153331 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.233175 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.338798 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.431431 4861 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.434880 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.438968 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-s46nf","openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.439057 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-657494565c-jc975"] Jan 29 06:39:41 crc kubenswrapper[4861]: E0129 06:39:41.439332 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" containerName="oauth-openshift" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.439361 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" containerName="oauth-openshift" Jan 29 06:39:41 crc kubenswrapper[4861]: E0129 06:39:41.439385 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" containerName="installer" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.439395 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" containerName="installer" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.439537 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" containerName="oauth-openshift" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.439572 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3643d6fc-457f-4932-9e89-d5ed6e176e45" containerName="installer" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.439593 4861 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.439635 4861 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="fead1b79-d9e2-4342-8ec9-039fd63d5a38" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.440170 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.442529 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.445013 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.445716 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.445774 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.446144 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.446280 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.446898 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.447549 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.448045 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.448335 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.448337 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.449044 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.449320 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.460178 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.467001 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.474157 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.475909 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=17.475877939 podStartE2EDuration="17.475877939s" podCreationTimestamp="2026-01-29 06:39:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:39:41.47071127 +0000 UTC m=+273.142205897" watchObservedRunningTime="2026-01-29 06:39:41.475877939 +0000 UTC m=+273.147372576" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.527470 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.528907 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.553413 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.556517 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-session\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.556567 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-audit-policies\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.556617 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.556657 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-audit-dir\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.556691 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.556809 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-router-certs\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.556967 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-error\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.557154 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-login\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.557194 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.557223 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.557333 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-service-ca\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.557356 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.557421 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.557475 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcjjr\" (UniqueName: \"kubernetes.io/projected/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-kube-api-access-dcjjr\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.580539 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.635482 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.659666 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.659752 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-router-certs\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.659788 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-error\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.659840 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-login\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.659889 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.659926 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.659975 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-service-ca\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.660004 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.660105 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.660144 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcjjr\" (UniqueName: \"kubernetes.io/projected/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-kube-api-access-dcjjr\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.660222 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-session\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.660259 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-audit-policies\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.660302 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.660663 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-audit-dir\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.660768 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-audit-dir\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.661717 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-service-ca\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.661884 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-audit-policies\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.661905 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.661917 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.669308 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-session\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.669416 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.669549 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-login\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.671322 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.671556 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.672573 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-router-certs\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.673927 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.687351 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-v4-0-config-user-template-error\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.701351 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcjjr\" (UniqueName: \"kubernetes.io/projected/23b3eab9-ac50-48b1-85d2-5479a1ae2ae6-kube-api-access-dcjjr\") pod \"oauth-openshift-657494565c-jc975\" (UID: \"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6\") " pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.751682 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.758802 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.770929 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.798244 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.809183 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.896377 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 29 06:39:41 crc kubenswrapper[4861]: I0129 06:39:41.928978 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.031727 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.059776 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.071051 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.133730 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.275737 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.283115 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.369369 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.378296 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.509421 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.520435 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.542476 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.553458 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.593844 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.639386 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.647637 4861 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.647751 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.647843 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.649097 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"29d51fc79f074b940ea28ee068c82984b43224c1716f5f182242c8edfbaff7bf"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.649304 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://29d51fc79f074b940ea28ee068c82984b43224c1716f5f182242c8edfbaff7bf" gracePeriod=30 Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.754620 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 29 06:39:42 crc kubenswrapper[4861]: I0129 06:39:42.848884 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.034922 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.118200 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.126413 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="407f7505-8386-467a-9b71-e1aea70b9c3d" path="/var/lib/kubelet/pods/407f7505-8386-467a-9b71-e1aea70b9c3d/volumes" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.148550 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.308803 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.335760 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.349465 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.454905 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.548370 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.629914 4861 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.662061 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.860639 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.918935 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.924884 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 29 06:39:43 crc kubenswrapper[4861]: I0129 06:39:43.946512 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.332862 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.349037 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.402104 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.480777 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.524407 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.525238 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.533986 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.538526 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.559276 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.601360 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.679001 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.725282 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 29 06:39:44 crc kubenswrapper[4861]: I0129 06:39:44.873194 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.023564 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.029019 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.037023 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.055173 4861 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.066634 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.086620 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.143480 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.217023 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.220434 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.281131 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.289535 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.340685 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.348223 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.354635 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.363532 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.536111 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.546910 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-657494565c-jc975"] Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.700593 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.707864 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.731323 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.751761 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.758768 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.772787 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-657494565c-jc975"] Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.803418 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.835944 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.883446 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 29 06:39:45 crc kubenswrapper[4861]: I0129 06:39:45.915939 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.110711 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.148527 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.189141 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.202372 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.214565 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.241672 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.319726 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.396558 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.482485 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.596500 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.661418 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-657494565c-jc975" event={"ID":"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6","Type":"ContainerStarted","Data":"64fadd67caa63e743ebecace9937cf5d0a32df9115c8025e652c8553dbead46b"} Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.661474 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-657494565c-jc975" event={"ID":"23b3eab9-ac50-48b1-85d2-5479a1ae2ae6","Type":"ContainerStarted","Data":"566b0bb712828d081e27422659a86b5a489095e295022ed20a8cecfa10b81c4f"} Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.662133 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.663097 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.663202 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.668879 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-657494565c-jc975" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.687860 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.694289 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-657494565c-jc975" podStartSLOduration=56.694265097 podStartE2EDuration="56.694265097s" podCreationTimestamp="2026-01-29 06:38:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:39:46.691563174 +0000 UTC m=+278.363057771" watchObservedRunningTime="2026-01-29 06:39:46.694265097 +0000 UTC m=+278.365759664" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.735318 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.739956 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.774968 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.791871 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.828734 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.882443 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 29 06:39:46 crc kubenswrapper[4861]: I0129 06:39:46.969545 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.049686 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.104639 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.107440 4861 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.107843 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://a96fc85349ef18db88384fa595ca86e8ca949d305c36b2be39c2dd8ad3532a46" gracePeriod=5 Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.226950 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.253290 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.321890 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.421462 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.434677 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.494791 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.630285 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.648316 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.659866 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.714215 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.758521 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.777493 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.804538 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.848210 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.863469 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 29 06:39:47 crc kubenswrapper[4861]: I0129 06:39:47.899575 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.069191 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.109504 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.113885 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.118318 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.207990 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.239190 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.441331 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.608128 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.608202 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.629405 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.758451 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.776591 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.921644 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 29 06:39:48 crc kubenswrapper[4861]: I0129 06:39:48.987157 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.122021 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.161585 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.340410 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.402460 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.417480 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.612850 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.640709 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.719481 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 29 06:39:49 crc kubenswrapper[4861]: I0129 06:39:49.753035 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 29 06:39:50 crc kubenswrapper[4861]: I0129 06:39:50.108348 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 06:39:50 crc kubenswrapper[4861]: I0129 06:39:50.405177 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 06:39:50 crc kubenswrapper[4861]: I0129 06:39:50.444665 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 29 06:39:50 crc kubenswrapper[4861]: I0129 06:39:50.584900 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 29 06:39:50 crc kubenswrapper[4861]: I0129 06:39:50.988214 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 29 06:39:50 crc kubenswrapper[4861]: I0129 06:39:50.989673 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 29 06:39:50 crc kubenswrapper[4861]: I0129 06:39:50.999147 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 29 06:39:51 crc kubenswrapper[4861]: I0129 06:39:51.001715 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 29 06:39:51 crc kubenswrapper[4861]: I0129 06:39:51.008189 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 29 06:39:51 crc kubenswrapper[4861]: I0129 06:39:51.374728 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 06:39:51 crc kubenswrapper[4861]: I0129 06:39:51.572562 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 29 06:39:51 crc kubenswrapper[4861]: I0129 06:39:51.917299 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 29 06:39:52 crc kubenswrapper[4861]: E0129 06:39:52.245861 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-conmon-a96fc85349ef18db88384fa595ca86e8ca949d305c36b2be39c2dd8ad3532a46.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-a96fc85349ef18db88384fa595ca86e8ca949d305c36b2be39c2dd8ad3532a46.scope\": RecentStats: unable to find data in memory cache]" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.510442 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.565554 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.684442 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.699069 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.699212 4861 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="a96fc85349ef18db88384fa595ca86e8ca949d305c36b2be39c2dd8ad3532a46" exitCode=137 Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.699282 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8765e68a739876557e8f9cb6ae7354dee4f119a7fad2dfe6bdd20b2df706dd2b" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.699410 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.699512 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.841988 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842138 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842177 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842205 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842271 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842325 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842358 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842400 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842541 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842680 4861 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842704 4861 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842720 4861 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.842735 4861 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.855977 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:39:52 crc kubenswrapper[4861]: I0129 06:39:52.943850 4861 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 06:39:53 crc kubenswrapper[4861]: I0129 06:39:53.000142 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 29 06:39:53 crc kubenswrapper[4861]: I0129 06:39:53.140218 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 29 06:39:53 crc kubenswrapper[4861]: I0129 06:39:53.317443 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 29 06:39:53 crc kubenswrapper[4861]: I0129 06:39:53.706503 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 06:40:08 crc kubenswrapper[4861]: I0129 06:40:08.785997 4861 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 29 06:40:08 crc kubenswrapper[4861]: I0129 06:40:08.834692 4861 generic.go:334] "Generic (PLEG): container finished" podID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerID="a2c61a1de90ac25dc67452730f0600283a9513bff2fe9648523a12ad0a19ec8d" exitCode=0 Jan 29 06:40:08 crc kubenswrapper[4861]: I0129 06:40:08.834816 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" event={"ID":"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3","Type":"ContainerDied","Data":"a2c61a1de90ac25dc67452730f0600283a9513bff2fe9648523a12ad0a19ec8d"} Jan 29 06:40:08 crc kubenswrapper[4861]: I0129 06:40:08.835638 4861 scope.go:117] "RemoveContainer" containerID="a2c61a1de90ac25dc67452730f0600283a9513bff2fe9648523a12ad0a19ec8d" Jan 29 06:40:09 crc kubenswrapper[4861]: I0129 06:40:09.845154 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" event={"ID":"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3","Type":"ContainerStarted","Data":"19f7178c968ec7212efe71f4d35a95ce9a3834a3bef62d87f6caa36247bf0060"} Jan 29 06:40:09 crc kubenswrapper[4861]: I0129 06:40:09.845953 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:40:09 crc kubenswrapper[4861]: I0129 06:40:09.848116 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:40:12 crc kubenswrapper[4861]: I0129 06:40:12.870190 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 29 06:40:12 crc kubenswrapper[4861]: I0129 06:40:12.873014 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 06:40:12 crc kubenswrapper[4861]: I0129 06:40:12.873061 4861 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="29d51fc79f074b940ea28ee068c82984b43224c1716f5f182242c8edfbaff7bf" exitCode=137 Jan 29 06:40:12 crc kubenswrapper[4861]: I0129 06:40:12.873125 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"29d51fc79f074b940ea28ee068c82984b43224c1716f5f182242c8edfbaff7bf"} Jan 29 06:40:12 crc kubenswrapper[4861]: I0129 06:40:12.873171 4861 scope.go:117] "RemoveContainer" containerID="7cbb26834fb28f4ad30b78f44a5fa7e6cb6f2a99f6025b46d99a1e0cda3328b0" Jan 29 06:40:13 crc kubenswrapper[4861]: I0129 06:40:13.882534 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 29 06:40:13 crc kubenswrapper[4861]: I0129 06:40:13.883455 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b5f5a138b47309222cfbcba04cdb0c08ca76180bc32bd49f57f8f2d0aad8222f"} Jan 29 06:40:14 crc kubenswrapper[4861]: I0129 06:40:14.383804 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:40:22 crc kubenswrapper[4861]: I0129 06:40:22.646817 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:40:22 crc kubenswrapper[4861]: I0129 06:40:22.652388 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:40:22 crc kubenswrapper[4861]: I0129 06:40:22.951767 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 06:40:33 crc kubenswrapper[4861]: I0129 06:40:33.374493 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8"] Jan 29 06:40:33 crc kubenswrapper[4861]: I0129 06:40:33.375403 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" podUID="ffe2c087-e478-43ea-89b2-af4c64778c35" containerName="route-controller-manager" containerID="cri-o://72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262" gracePeriod=30 Jan 29 06:40:33 crc kubenswrapper[4861]: I0129 06:40:33.383245 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pzvfs"] Jan 29 06:40:33 crc kubenswrapper[4861]: I0129 06:40:33.383523 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" podUID="cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" containerName="controller-manager" containerID="cri-o://1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626" gracePeriod=30 Jan 29 06:40:33 crc kubenswrapper[4861]: I0129 06:40:33.850811 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:40:33 crc kubenswrapper[4861]: I0129 06:40:33.907719 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.998720 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp9fw\" (UniqueName: \"kubernetes.io/projected/ffe2c087-e478-43ea-89b2-af4c64778c35-kube-api-access-kp9fw\") pod \"ffe2c087-e478-43ea-89b2-af4c64778c35\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.998778 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-config\") pod \"ffe2c087-e478-43ea-89b2-af4c64778c35\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.998804 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sthrp\" (UniqueName: \"kubernetes.io/projected/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-kube-api-access-sthrp\") pod \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.998841 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-proxy-ca-bundles\") pod \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.998865 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-serving-cert\") pod \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.998906 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-client-ca\") pod \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.998935 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-client-ca\") pod \"ffe2c087-e478-43ea-89b2-af4c64778c35\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.998973 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-config\") pod \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\" (UID: \"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.999019 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffe2c087-e478-43ea-89b2-af4c64778c35-serving-cert\") pod \"ffe2c087-e478-43ea-89b2-af4c64778c35\" (UID: \"ffe2c087-e478-43ea-89b2-af4c64778c35\") " Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:33.999797 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-config" (OuterVolumeSpecName: "config") pod "ffe2c087-e478-43ea-89b2-af4c64778c35" (UID: "ffe2c087-e478-43ea-89b2-af4c64778c35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.000544 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" (UID: "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.000560 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-client-ca" (OuterVolumeSpecName: "client-ca") pod "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" (UID: "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.000561 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-client-ca" (OuterVolumeSpecName: "client-ca") pod "ffe2c087-e478-43ea-89b2-af4c64778c35" (UID: "ffe2c087-e478-43ea-89b2-af4c64778c35"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.000930 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-config" (OuterVolumeSpecName: "config") pod "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" (UID: "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.006046 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffe2c087-e478-43ea-89b2-af4c64778c35-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ffe2c087-e478-43ea-89b2-af4c64778c35" (UID: "ffe2c087-e478-43ea-89b2-af4c64778c35"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.006058 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffe2c087-e478-43ea-89b2-af4c64778c35-kube-api-access-kp9fw" (OuterVolumeSpecName: "kube-api-access-kp9fw") pod "ffe2c087-e478-43ea-89b2-af4c64778c35" (UID: "ffe2c087-e478-43ea-89b2-af4c64778c35"). InnerVolumeSpecName "kube-api-access-kp9fw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.006180 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-kube-api-access-sthrp" (OuterVolumeSpecName: "kube-api-access-sthrp") pod "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" (UID: "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36"). InnerVolumeSpecName "kube-api-access-sthrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.008513 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" (UID: "cdeb1c43-11fd-4a37-b31d-f4c1e7600e36"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.022591 4861 generic.go:334] "Generic (PLEG): container finished" podID="ffe2c087-e478-43ea-89b2-af4c64778c35" containerID="72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262" exitCode=0 Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.022718 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" event={"ID":"ffe2c087-e478-43ea-89b2-af4c64778c35","Type":"ContainerDied","Data":"72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262"} Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.022754 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" event={"ID":"ffe2c087-e478-43ea-89b2-af4c64778c35","Type":"ContainerDied","Data":"4d2aeb1dd5a80db18e94a150d624f95cf6a7abd39f920674c6cdd69c6b860e32"} Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.022774 4861 scope.go:117] "RemoveContainer" containerID="72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.022969 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.025985 4861 generic.go:334] "Generic (PLEG): container finished" podID="cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" containerID="1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626" exitCode=0 Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.026056 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" event={"ID":"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36","Type":"ContainerDied","Data":"1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626"} Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.026119 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" event={"ID":"cdeb1c43-11fd-4a37-b31d-f4c1e7600e36","Type":"ContainerDied","Data":"f6b4456cdf8672d96ac8eb836920b2eb1b8d5af0c356e3cd7d7aacddb2529336"} Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.026199 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pzvfs" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.043958 4861 scope.go:117] "RemoveContainer" containerID="72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262" Jan 29 06:40:34 crc kubenswrapper[4861]: E0129 06:40:34.044464 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262\": container with ID starting with 72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262 not found: ID does not exist" containerID="72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.044510 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262"} err="failed to get container status \"72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262\": rpc error: code = NotFound desc = could not find container \"72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262\": container with ID starting with 72539d1aaa699ff746086f836dbb99a48b55832b7a990c6f8e7ddb811a67e262 not found: ID does not exist" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.044540 4861 scope.go:117] "RemoveContainer" containerID="1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.053299 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8"] Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.055672 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-48cc8"] Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.066182 4861 scope.go:117] "RemoveContainer" containerID="1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626" Jan 29 06:40:34 crc kubenswrapper[4861]: E0129 06:40:34.069593 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626\": container with ID starting with 1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626 not found: ID does not exist" containerID="1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.069758 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626"} err="failed to get container status \"1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626\": rpc error: code = NotFound desc = could not find container \"1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626\": container with ID starting with 1b226655fc39311968b2c30a72f1decf126d235170ee5b4d1f43765afb8ec626 not found: ID does not exist" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.090149 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pzvfs"] Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.093704 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pzvfs"] Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100586 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffe2c087-e478-43ea-89b2-af4c64778c35-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100621 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp9fw\" (UniqueName: \"kubernetes.io/projected/ffe2c087-e478-43ea-89b2-af4c64778c35-kube-api-access-kp9fw\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100662 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100675 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sthrp\" (UniqueName: \"kubernetes.io/projected/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-kube-api-access-sthrp\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100686 4861 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100696 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100705 4861 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100715 4861 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffe2c087-e478-43ea-89b2-af4c64778c35-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.100724 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.779595 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj"] Jan 29 06:40:34 crc kubenswrapper[4861]: E0129 06:40:34.780051 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.780109 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 06:40:34 crc kubenswrapper[4861]: E0129 06:40:34.780131 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffe2c087-e478-43ea-89b2-af4c64778c35" containerName="route-controller-manager" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.780146 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffe2c087-e478-43ea-89b2-af4c64778c35" containerName="route-controller-manager" Jan 29 06:40:34 crc kubenswrapper[4861]: E0129 06:40:34.780184 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" containerName="controller-manager" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.780199 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" containerName="controller-manager" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.782045 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" containerName="controller-manager" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.782114 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.782147 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffe2c087-e478-43ea-89b2-af4c64778c35" containerName="route-controller-manager" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.782842 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc"] Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.783109 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.783843 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.785669 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.789268 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.789615 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.789730 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.789849 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.789904 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.790527 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.789900 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.791259 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.791695 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.792154 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.792350 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.802963 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj"] Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.806498 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.808097 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc"] Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911496 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23da7d08-55d6-47ef-aaed-bc3056cd84b2-serving-cert\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911583 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-config\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911620 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-client-ca\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911663 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-client-ca\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911698 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-proxy-ca-bundles\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911735 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-config\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911768 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vnvx\" (UniqueName: \"kubernetes.io/projected/7ddd394d-c898-4a53-89cc-aaecf936e479-kube-api-access-8vnvx\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911814 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7mrx\" (UniqueName: \"kubernetes.io/projected/23da7d08-55d6-47ef-aaed-bc3056cd84b2-kube-api-access-g7mrx\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:34 crc kubenswrapper[4861]: I0129 06:40:34.911842 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ddd394d-c898-4a53-89cc-aaecf936e479-serving-cert\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.013905 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23da7d08-55d6-47ef-aaed-bc3056cd84b2-serving-cert\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.014016 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-config\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.014063 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-client-ca\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.014144 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-client-ca\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.014190 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-proxy-ca-bundles\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.014242 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-config\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.014287 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vnvx\" (UniqueName: \"kubernetes.io/projected/7ddd394d-c898-4a53-89cc-aaecf936e479-kube-api-access-8vnvx\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.014345 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7mrx\" (UniqueName: \"kubernetes.io/projected/23da7d08-55d6-47ef-aaed-bc3056cd84b2-kube-api-access-g7mrx\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.014387 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ddd394d-c898-4a53-89cc-aaecf936e479-serving-cert\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.016054 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-client-ca\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.016332 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-config\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.016706 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-client-ca\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.016993 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-config\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.017650 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-proxy-ca-bundles\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.021050 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ddd394d-c898-4a53-89cc-aaecf936e479-serving-cert\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.022772 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23da7d08-55d6-47ef-aaed-bc3056cd84b2-serving-cert\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.035233 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vnvx\" (UniqueName: \"kubernetes.io/projected/7ddd394d-c898-4a53-89cc-aaecf936e479-kube-api-access-8vnvx\") pod \"route-controller-manager-5756f44d6-sj4xc\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.050003 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7mrx\" (UniqueName: \"kubernetes.io/projected/23da7d08-55d6-47ef-aaed-bc3056cd84b2-kube-api-access-g7mrx\") pod \"controller-manager-7ccb65cc84-dc9tj\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.126433 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdeb1c43-11fd-4a37-b31d-f4c1e7600e36" path="/var/lib/kubelet/pods/cdeb1c43-11fd-4a37-b31d-f4c1e7600e36/volumes" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.127682 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffe2c087-e478-43ea-89b2-af4c64778c35" path="/var/lib/kubelet/pods/ffe2c087-e478-43ea-89b2-af4c64778c35/volumes" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.143852 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.155660 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.488782 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj"] Jan 29 06:40:35 crc kubenswrapper[4861]: I0129 06:40:35.511573 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc"] Jan 29 06:40:35 crc kubenswrapper[4861]: W0129 06:40:35.518898 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ddd394d_c898_4a53_89cc_aaecf936e479.slice/crio-4356c0c11a72c55f1a0469302026d6767531468f2eada58359d9837c66bdcb6e WatchSource:0}: Error finding container 4356c0c11a72c55f1a0469302026d6767531468f2eada58359d9837c66bdcb6e: Status 404 returned error can't find the container with id 4356c0c11a72c55f1a0469302026d6767531468f2eada58359d9837c66bdcb6e Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.042572 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" event={"ID":"23da7d08-55d6-47ef-aaed-bc3056cd84b2","Type":"ContainerStarted","Data":"e78227581c009bdde316b685b4cdbb8e2e7f517eddadccbe16acf3472d68c97c"} Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.042623 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" event={"ID":"23da7d08-55d6-47ef-aaed-bc3056cd84b2","Type":"ContainerStarted","Data":"423d45ef0bec464642a30725ab9826434e2138eebb199a1cf518b84f736cb32e"} Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.042960 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.045706 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" event={"ID":"7ddd394d-c898-4a53-89cc-aaecf936e479","Type":"ContainerStarted","Data":"c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b"} Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.045765 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" event={"ID":"7ddd394d-c898-4a53-89cc-aaecf936e479","Type":"ContainerStarted","Data":"4356c0c11a72c55f1a0469302026d6767531468f2eada58359d9837c66bdcb6e"} Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.046113 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.051312 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.075843 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" podStartSLOduration=3.075819108 podStartE2EDuration="3.075819108s" podCreationTimestamp="2026-01-29 06:40:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:40:36.067194765 +0000 UTC m=+327.738689322" watchObservedRunningTime="2026-01-29 06:40:36.075819108 +0000 UTC m=+327.747313655" Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.100980 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" podStartSLOduration=3.100960947 podStartE2EDuration="3.100960947s" podCreationTimestamp="2026-01-29 06:40:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:40:36.098816379 +0000 UTC m=+327.770310936" watchObservedRunningTime="2026-01-29 06:40:36.100960947 +0000 UTC m=+327.772455494" Jan 29 06:40:36 crc kubenswrapper[4861]: I0129 06:40:36.383228 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.160831 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc"] Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.161627 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" podUID="7ddd394d-c898-4a53-89cc-aaecf936e479" containerName="route-controller-manager" containerID="cri-o://c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b" gracePeriod=30 Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.683209 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.825007 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-client-ca\") pod \"7ddd394d-c898-4a53-89cc-aaecf936e479\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.825096 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-config\") pod \"7ddd394d-c898-4a53-89cc-aaecf936e479\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.825130 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vnvx\" (UniqueName: \"kubernetes.io/projected/7ddd394d-c898-4a53-89cc-aaecf936e479-kube-api-access-8vnvx\") pod \"7ddd394d-c898-4a53-89cc-aaecf936e479\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.825960 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ddd394d-c898-4a53-89cc-aaecf936e479-serving-cert\") pod \"7ddd394d-c898-4a53-89cc-aaecf936e479\" (UID: \"7ddd394d-c898-4a53-89cc-aaecf936e479\") " Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.826672 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-client-ca" (OuterVolumeSpecName: "client-ca") pod "7ddd394d-c898-4a53-89cc-aaecf936e479" (UID: "7ddd394d-c898-4a53-89cc-aaecf936e479"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.827712 4861 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.828444 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-config" (OuterVolumeSpecName: "config") pod "7ddd394d-c898-4a53-89cc-aaecf936e479" (UID: "7ddd394d-c898-4a53-89cc-aaecf936e479"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.839595 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ddd394d-c898-4a53-89cc-aaecf936e479-kube-api-access-8vnvx" (OuterVolumeSpecName: "kube-api-access-8vnvx") pod "7ddd394d-c898-4a53-89cc-aaecf936e479" (UID: "7ddd394d-c898-4a53-89cc-aaecf936e479"). InnerVolumeSpecName "kube-api-access-8vnvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.855526 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ddd394d-c898-4a53-89cc-aaecf936e479-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7ddd394d-c898-4a53-89cc-aaecf936e479" (UID: "7ddd394d-c898-4a53-89cc-aaecf936e479"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.929318 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd394d-c898-4a53-89cc-aaecf936e479-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.929361 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vnvx\" (UniqueName: \"kubernetes.io/projected/7ddd394d-c898-4a53-89cc-aaecf936e479-kube-api-access-8vnvx\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:41 crc kubenswrapper[4861]: I0129 06:40:41.929377 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ddd394d-c898-4a53-89cc-aaecf936e479-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.127212 4861 generic.go:334] "Generic (PLEG): container finished" podID="7ddd394d-c898-4a53-89cc-aaecf936e479" containerID="c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b" exitCode=0 Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.127278 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" event={"ID":"7ddd394d-c898-4a53-89cc-aaecf936e479","Type":"ContainerDied","Data":"c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b"} Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.127319 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" event={"ID":"7ddd394d-c898-4a53-89cc-aaecf936e479","Type":"ContainerDied","Data":"4356c0c11a72c55f1a0469302026d6767531468f2eada58359d9837c66bdcb6e"} Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.127344 4861 scope.go:117] "RemoveContainer" containerID="c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.127579 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.157190 4861 scope.go:117] "RemoveContainer" containerID="c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b" Jan 29 06:40:42 crc kubenswrapper[4861]: E0129 06:40:42.157898 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b\": container with ID starting with c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b not found: ID does not exist" containerID="c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.157942 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b"} err="failed to get container status \"c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b\": rpc error: code = NotFound desc = could not find container \"c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b\": container with ID starting with c6a056b1d027fdbb502a9e45ab9ee13e1a161ca3e25405039adf52f2632c331b not found: ID does not exist" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.168824 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc"] Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.181688 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5756f44d6-sj4xc"] Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.784529 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2"] Jan 29 06:40:42 crc kubenswrapper[4861]: E0129 06:40:42.785158 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ddd394d-c898-4a53-89cc-aaecf936e479" containerName="route-controller-manager" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.785174 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ddd394d-c898-4a53-89cc-aaecf936e479" containerName="route-controller-manager" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.785273 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ddd394d-c898-4a53-89cc-aaecf936e479" containerName="route-controller-manager" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.785731 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.788627 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.788985 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.789217 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.789328 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.789416 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.789900 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.799490 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2"] Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.949770 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f01e8d6-f035-4019-9222-705db0180162-client-ca\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.949841 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f01e8d6-f035-4019-9222-705db0180162-serving-cert\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.950194 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmbxp\" (UniqueName: \"kubernetes.io/projected/8f01e8d6-f035-4019-9222-705db0180162-kube-api-access-hmbxp\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:42 crc kubenswrapper[4861]: I0129 06:40:42.950472 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f01e8d6-f035-4019-9222-705db0180162-config\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.052655 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmbxp\" (UniqueName: \"kubernetes.io/projected/8f01e8d6-f035-4019-9222-705db0180162-kube-api-access-hmbxp\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.052798 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f01e8d6-f035-4019-9222-705db0180162-config\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.052880 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f01e8d6-f035-4019-9222-705db0180162-client-ca\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.052935 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f01e8d6-f035-4019-9222-705db0180162-serving-cert\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.055061 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f01e8d6-f035-4019-9222-705db0180162-config\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.055392 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f01e8d6-f035-4019-9222-705db0180162-client-ca\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.061845 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f01e8d6-f035-4019-9222-705db0180162-serving-cert\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.075706 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmbxp\" (UniqueName: \"kubernetes.io/projected/8f01e8d6-f035-4019-9222-705db0180162-kube-api-access-hmbxp\") pod \"route-controller-manager-d4c89595b-f52x2\" (UID: \"8f01e8d6-f035-4019-9222-705db0180162\") " pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.108944 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.128596 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ddd394d-c898-4a53-89cc-aaecf936e479" path="/var/lib/kubelet/pods/7ddd394d-c898-4a53-89cc-aaecf936e479/volumes" Jan 29 06:40:43 crc kubenswrapper[4861]: I0129 06:40:43.401510 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2"] Jan 29 06:40:44 crc kubenswrapper[4861]: I0129 06:40:44.148704 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" event={"ID":"8f01e8d6-f035-4019-9222-705db0180162","Type":"ContainerStarted","Data":"9dbab827006c5b1fd03de495ee76bb25ce36cdfeccaf5cd6f09159b54d329361"} Jan 29 06:40:44 crc kubenswrapper[4861]: I0129 06:40:44.149649 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" event={"ID":"8f01e8d6-f035-4019-9222-705db0180162","Type":"ContainerStarted","Data":"3efc4230be8c3193a7d7951e8f2c6c9baed810a7c1a4456beb3324508b3ee507"} Jan 29 06:40:44 crc kubenswrapper[4861]: I0129 06:40:44.150138 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:44 crc kubenswrapper[4861]: I0129 06:40:44.156759 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" Jan 29 06:40:44 crc kubenswrapper[4861]: I0129 06:40:44.181661 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-d4c89595b-f52x2" podStartSLOduration=3.181611819 podStartE2EDuration="3.181611819s" podCreationTimestamp="2026-01-29 06:40:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:40:44.18016497 +0000 UTC m=+335.851659537" watchObservedRunningTime="2026-01-29 06:40:44.181611819 +0000 UTC m=+335.853106416" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.236991 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tcsrb"] Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.238458 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.262575 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tcsrb"] Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.442686 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/23b86675-f2d5-4eab-bf22-e80150cda940-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.442781 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-registry-tls\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.442809 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpmzw\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-kube-api-access-xpmzw\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.442841 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/23b86675-f2d5-4eab-bf22-e80150cda940-registry-certificates\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.442929 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-bound-sa-token\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.442971 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/23b86675-f2d5-4eab-bf22-e80150cda940-trusted-ca\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.443020 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.443061 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/23b86675-f2d5-4eab-bf22-e80150cda940-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.470965 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.545105 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/23b86675-f2d5-4eab-bf22-e80150cda940-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.545204 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/23b86675-f2d5-4eab-bf22-e80150cda940-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.545261 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-registry-tls\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.545291 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpmzw\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-kube-api-access-xpmzw\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.545322 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/23b86675-f2d5-4eab-bf22-e80150cda940-registry-certificates\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.545355 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-bound-sa-token\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.545394 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/23b86675-f2d5-4eab-bf22-e80150cda940-trusted-ca\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.546157 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/23b86675-f2d5-4eab-bf22-e80150cda940-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.547302 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/23b86675-f2d5-4eab-bf22-e80150cda940-registry-certificates\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.552828 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/23b86675-f2d5-4eab-bf22-e80150cda940-trusted-ca\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.553479 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/23b86675-f2d5-4eab-bf22-e80150cda940-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.553679 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-registry-tls\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.568903 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpmzw\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-kube-api-access-xpmzw\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.584465 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/23b86675-f2d5-4eab-bf22-e80150cda940-bound-sa-token\") pod \"image-registry-66df7c8f76-tcsrb\" (UID: \"23b86675-f2d5-4eab-bf22-e80150cda940\") " pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:47 crc kubenswrapper[4861]: I0129 06:40:47.858490 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:48 crc kubenswrapper[4861]: I0129 06:40:48.349797 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tcsrb"] Jan 29 06:40:48 crc kubenswrapper[4861]: W0129 06:40:48.354224 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23b86675_f2d5_4eab_bf22_e80150cda940.slice/crio-294b34587f2e7b9fee48ad54504cc42e0ae1c2a03bf51882f659c911ac4c39ea WatchSource:0}: Error finding container 294b34587f2e7b9fee48ad54504cc42e0ae1c2a03bf51882f659c911ac4c39ea: Status 404 returned error can't find the container with id 294b34587f2e7b9fee48ad54504cc42e0ae1c2a03bf51882f659c911ac4c39ea Jan 29 06:40:49 crc kubenswrapper[4861]: I0129 06:40:49.196588 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" event={"ID":"23b86675-f2d5-4eab-bf22-e80150cda940","Type":"ContainerStarted","Data":"22de03a22c2116887d5637bd187c032c91dbe4293d29d944683c45c08508b84a"} Jan 29 06:40:49 crc kubenswrapper[4861]: I0129 06:40:49.196662 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" event={"ID":"23b86675-f2d5-4eab-bf22-e80150cda940","Type":"ContainerStarted","Data":"294b34587f2e7b9fee48ad54504cc42e0ae1c2a03bf51882f659c911ac4c39ea"} Jan 29 06:40:49 crc kubenswrapper[4861]: I0129 06:40:49.197504 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:40:49 crc kubenswrapper[4861]: I0129 06:40:49.218853 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" podStartSLOduration=2.218832268 podStartE2EDuration="2.218832268s" podCreationTimestamp="2026-01-29 06:40:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:40:49.217714048 +0000 UTC m=+340.889208615" watchObservedRunningTime="2026-01-29 06:40:49.218832268 +0000 UTC m=+340.890326825" Jan 29 06:41:00 crc kubenswrapper[4861]: I0129 06:41:00.630829 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:41:00 crc kubenswrapper[4861]: I0129 06:41:00.631958 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:41:07 crc kubenswrapper[4861]: I0129 06:41:07.871304 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-tcsrb" Jan 29 06:41:07 crc kubenswrapper[4861]: I0129 06:41:07.950137 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v4kqn"] Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.177668 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj"] Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.178760 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" podUID="23da7d08-55d6-47ef-aaed-bc3056cd84b2" containerName="controller-manager" containerID="cri-o://e78227581c009bdde316b685b4cdbb8e2e7f517eddadccbe16acf3472d68c97c" gracePeriod=30 Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.450618 4861 generic.go:334] "Generic (PLEG): container finished" podID="23da7d08-55d6-47ef-aaed-bc3056cd84b2" containerID="e78227581c009bdde316b685b4cdbb8e2e7f517eddadccbe16acf3472d68c97c" exitCode=0 Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.450664 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" event={"ID":"23da7d08-55d6-47ef-aaed-bc3056cd84b2","Type":"ContainerDied","Data":"e78227581c009bdde316b685b4cdbb8e2e7f517eddadccbe16acf3472d68c97c"} Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.619529 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.768321 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23da7d08-55d6-47ef-aaed-bc3056cd84b2-serving-cert\") pod \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.768463 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-config\") pod \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.768496 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-proxy-ca-bundles\") pod \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.768555 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7mrx\" (UniqueName: \"kubernetes.io/projected/23da7d08-55d6-47ef-aaed-bc3056cd84b2-kube-api-access-g7mrx\") pod \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.768609 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-client-ca\") pod \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\" (UID: \"23da7d08-55d6-47ef-aaed-bc3056cd84b2\") " Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.770861 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "23da7d08-55d6-47ef-aaed-bc3056cd84b2" (UID: "23da7d08-55d6-47ef-aaed-bc3056cd84b2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.770925 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-client-ca" (OuterVolumeSpecName: "client-ca") pod "23da7d08-55d6-47ef-aaed-bc3056cd84b2" (UID: "23da7d08-55d6-47ef-aaed-bc3056cd84b2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.771191 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-config" (OuterVolumeSpecName: "config") pod "23da7d08-55d6-47ef-aaed-bc3056cd84b2" (UID: "23da7d08-55d6-47ef-aaed-bc3056cd84b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.777985 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23da7d08-55d6-47ef-aaed-bc3056cd84b2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "23da7d08-55d6-47ef-aaed-bc3056cd84b2" (UID: "23da7d08-55d6-47ef-aaed-bc3056cd84b2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.780118 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23da7d08-55d6-47ef-aaed-bc3056cd84b2-kube-api-access-g7mrx" (OuterVolumeSpecName: "kube-api-access-g7mrx") pod "23da7d08-55d6-47ef-aaed-bc3056cd84b2" (UID: "23da7d08-55d6-47ef-aaed-bc3056cd84b2"). InnerVolumeSpecName "kube-api-access-g7mrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.870231 4861 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23da7d08-55d6-47ef-aaed-bc3056cd84b2-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.870278 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.870288 4861 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.870302 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7mrx\" (UniqueName: \"kubernetes.io/projected/23da7d08-55d6-47ef-aaed-bc3056cd84b2-kube-api-access-g7mrx\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:21 crc kubenswrapper[4861]: I0129 06:41:21.870312 4861 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23da7d08-55d6-47ef-aaed-bc3056cd84b2-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.460948 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" event={"ID":"23da7d08-55d6-47ef-aaed-bc3056cd84b2","Type":"ContainerDied","Data":"423d45ef0bec464642a30725ab9826434e2138eebb199a1cf518b84f736cb32e"} Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.463163 4861 scope.go:117] "RemoveContainer" containerID="e78227581c009bdde316b685b4cdbb8e2e7f517eddadccbe16acf3472d68c97c" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.461061 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.538998 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj"] Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.544979 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7ccb65cc84-dc9tj"] Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.815519 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7659fc97dd-m2dck"] Jan 29 06:41:22 crc kubenswrapper[4861]: E0129 06:41:22.815979 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23da7d08-55d6-47ef-aaed-bc3056cd84b2" containerName="controller-manager" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.816000 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="23da7d08-55d6-47ef-aaed-bc3056cd84b2" containerName="controller-manager" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.816142 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="23da7d08-55d6-47ef-aaed-bc3056cd84b2" containerName="controller-manager" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.816693 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.820915 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.821765 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.821955 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.822016 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.828312 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.832172 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.833751 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.852864 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7659fc97dd-m2dck"] Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.895524 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-client-ca\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.895600 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbsw7\" (UniqueName: \"kubernetes.io/projected/3ac2b589-04fb-40d6-8760-f0586e69c452-kube-api-access-wbsw7\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.895637 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-config\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.895662 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-proxy-ca-bundles\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.895733 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ac2b589-04fb-40d6-8760-f0586e69c452-serving-cert\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.997596 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbsw7\" (UniqueName: \"kubernetes.io/projected/3ac2b589-04fb-40d6-8760-f0586e69c452-kube-api-access-wbsw7\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.997667 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-config\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.997707 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-proxy-ca-bundles\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.997765 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ac2b589-04fb-40d6-8760-f0586e69c452-serving-cert\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.999211 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-client-ca\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:22 crc kubenswrapper[4861]: I0129 06:41:22.999645 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-client-ca\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:23 crc kubenswrapper[4861]: I0129 06:41:23.000402 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-proxy-ca-bundles\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:23 crc kubenswrapper[4861]: I0129 06:41:23.001011 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ac2b589-04fb-40d6-8760-f0586e69c452-config\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:23 crc kubenswrapper[4861]: I0129 06:41:23.006142 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ac2b589-04fb-40d6-8760-f0586e69c452-serving-cert\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:23 crc kubenswrapper[4861]: I0129 06:41:23.031381 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbsw7\" (UniqueName: \"kubernetes.io/projected/3ac2b589-04fb-40d6-8760-f0586e69c452-kube-api-access-wbsw7\") pod \"controller-manager-7659fc97dd-m2dck\" (UID: \"3ac2b589-04fb-40d6-8760-f0586e69c452\") " pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:23 crc kubenswrapper[4861]: I0129 06:41:23.124435 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23da7d08-55d6-47ef-aaed-bc3056cd84b2" path="/var/lib/kubelet/pods/23da7d08-55d6-47ef-aaed-bc3056cd84b2/volumes" Jan 29 06:41:23 crc kubenswrapper[4861]: I0129 06:41:23.148594 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:23 crc kubenswrapper[4861]: I0129 06:41:23.396941 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7659fc97dd-m2dck"] Jan 29 06:41:23 crc kubenswrapper[4861]: I0129 06:41:23.469519 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" event={"ID":"3ac2b589-04fb-40d6-8760-f0586e69c452","Type":"ContainerStarted","Data":"8087d3bca9e177243730d0f697a57451251e4f9473f245d956abd8ab81320531"} Jan 29 06:41:24 crc kubenswrapper[4861]: I0129 06:41:24.478794 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" event={"ID":"3ac2b589-04fb-40d6-8760-f0586e69c452","Type":"ContainerStarted","Data":"577b9d91ad01f046c6dfe8bbe87c8e67fd6181cf4037f3694603d028b6c959ab"} Jan 29 06:41:24 crc kubenswrapper[4861]: I0129 06:41:24.482525 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:24 crc kubenswrapper[4861]: I0129 06:41:24.488701 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" Jan 29 06:41:24 crc kubenswrapper[4861]: I0129 06:41:24.504192 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7659fc97dd-m2dck" podStartSLOduration=3.504162623 podStartE2EDuration="3.504162623s" podCreationTimestamp="2026-01-29 06:41:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:41:24.49922216 +0000 UTC m=+376.170716727" watchObservedRunningTime="2026-01-29 06:41:24.504162623 +0000 UTC m=+376.175657180" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.293832 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b5hfw"] Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.294197 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-b5hfw" podUID="840bff63-3c46-4e8d-baa2-32315812df45" containerName="registry-server" containerID="cri-o://053e49e4cdf2556eeb1448e3d207d941d25190edad7baf91d26d6cf6bf5884c3" gracePeriod=30 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.299298 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-79pqx"] Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.300195 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-79pqx" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="registry-server" containerID="cri-o://90c8c54be52c4184fdb1c277adff8d4d4a01a80f606aacb12d029f2c69754db9" gracePeriod=30 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.312778 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7tpcj"] Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.313098 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" containerID="cri-o://19f7178c968ec7212efe71f4d35a95ce9a3834a3bef62d87f6caa36247bf0060" gracePeriod=30 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.331693 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-whxwx"] Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.332128 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-whxwx" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerName="registry-server" containerID="cri-o://398d8b4115f86cb01cfb2d60fb53e0cfcc25574d8a811f1e070d966a593d9d4c" gracePeriod=30 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.333160 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-z4rxt"] Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.333472 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-z4rxt" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="registry-server" containerID="cri-o://8e8ede30a4d4a538163d26a90f46dc759aa32cb317538af95b24ccbfa64656c6" gracePeriod=30 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.351964 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qd79z"] Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.353027 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.362346 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qd79z"] Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.442616 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/72d86b3f-e0da-4302-8327-90fc7eebdf64-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.442704 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/72d86b3f-e0da-4302-8327-90fc7eebdf64-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.442734 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq8hn\" (UniqueName: \"kubernetes.io/projected/72d86b3f-e0da-4302-8327-90fc7eebdf64-kube-api-access-zq8hn\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.493108 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerID="90c8c54be52c4184fdb1c277adff8d4d4a01a80f606aacb12d029f2c69754db9" exitCode=0 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.493233 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79pqx" event={"ID":"7d4408c0-2666-4112-ba0d-c9427a29fc66","Type":"ContainerDied","Data":"90c8c54be52c4184fdb1c277adff8d4d4a01a80f606aacb12d029f2c69754db9"} Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.497311 4861 generic.go:334] "Generic (PLEG): container finished" podID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerID="8e8ede30a4d4a538163d26a90f46dc759aa32cb317538af95b24ccbfa64656c6" exitCode=0 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.497377 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4rxt" event={"ID":"6ee375d9-c78e-4c98-a04f-c004903dbf12","Type":"ContainerDied","Data":"8e8ede30a4d4a538163d26a90f46dc759aa32cb317538af95b24ccbfa64656c6"} Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.503476 4861 generic.go:334] "Generic (PLEG): container finished" podID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerID="19f7178c968ec7212efe71f4d35a95ce9a3834a3bef62d87f6caa36247bf0060" exitCode=0 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.503539 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" event={"ID":"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3","Type":"ContainerDied","Data":"19f7178c968ec7212efe71f4d35a95ce9a3834a3bef62d87f6caa36247bf0060"} Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.503583 4861 scope.go:117] "RemoveContainer" containerID="a2c61a1de90ac25dc67452730f0600283a9513bff2fe9648523a12ad0a19ec8d" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.510804 4861 generic.go:334] "Generic (PLEG): container finished" podID="840bff63-3c46-4e8d-baa2-32315812df45" containerID="053e49e4cdf2556eeb1448e3d207d941d25190edad7baf91d26d6cf6bf5884c3" exitCode=0 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.510961 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b5hfw" event={"ID":"840bff63-3c46-4e8d-baa2-32315812df45","Type":"ContainerDied","Data":"053e49e4cdf2556eeb1448e3d207d941d25190edad7baf91d26d6cf6bf5884c3"} Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.513471 4861 generic.go:334] "Generic (PLEG): container finished" podID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerID="398d8b4115f86cb01cfb2d60fb53e0cfcc25574d8a811f1e070d966a593d9d4c" exitCode=0 Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.513525 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-whxwx" event={"ID":"77c754cb-8b6c-4c93-940e-fb17e49b51e6","Type":"ContainerDied","Data":"398d8b4115f86cb01cfb2d60fb53e0cfcc25574d8a811f1e070d966a593d9d4c"} Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.544239 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/72d86b3f-e0da-4302-8327-90fc7eebdf64-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.544305 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/72d86b3f-e0da-4302-8327-90fc7eebdf64-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.544333 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq8hn\" (UniqueName: \"kubernetes.io/projected/72d86b3f-e0da-4302-8327-90fc7eebdf64-kube-api-access-zq8hn\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.548521 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/72d86b3f-e0da-4302-8327-90fc7eebdf64-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.553884 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/72d86b3f-e0da-4302-8327-90fc7eebdf64-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.562514 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq8hn\" (UniqueName: \"kubernetes.io/projected/72d86b3f-e0da-4302-8327-90fc7eebdf64-kube-api-access-zq8hn\") pod \"marketplace-operator-79b997595-qd79z\" (UID: \"72d86b3f-e0da-4302-8327-90fc7eebdf64\") " pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.784344 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.793796 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.903326 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.911206 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.933052 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.933284 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.961967 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9ssd\" (UniqueName: \"kubernetes.io/projected/6ee375d9-c78e-4c98-a04f-c004903dbf12-kube-api-access-z9ssd\") pod \"6ee375d9-c78e-4c98-a04f-c004903dbf12\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.962024 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-utilities\") pod \"6ee375d9-c78e-4c98-a04f-c004903dbf12\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.962126 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-catalog-content\") pod \"6ee375d9-c78e-4c98-a04f-c004903dbf12\" (UID: \"6ee375d9-c78e-4c98-a04f-c004903dbf12\") " Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.964644 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-utilities" (OuterVolumeSpecName: "utilities") pod "6ee375d9-c78e-4c98-a04f-c004903dbf12" (UID: "6ee375d9-c78e-4c98-a04f-c004903dbf12"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:25 crc kubenswrapper[4861]: I0129 06:41:25.974356 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ee375d9-c78e-4c98-a04f-c004903dbf12-kube-api-access-z9ssd" (OuterVolumeSpecName: "kube-api-access-z9ssd") pod "6ee375d9-c78e-4c98-a04f-c004903dbf12" (UID: "6ee375d9-c78e-4c98-a04f-c004903dbf12"). InnerVolumeSpecName "kube-api-access-z9ssd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.063799 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c8m7\" (UniqueName: \"kubernetes.io/projected/840bff63-3c46-4e8d-baa2-32315812df45-kube-api-access-9c8m7\") pod \"840bff63-3c46-4e8d-baa2-32315812df45\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.063838 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72k8s\" (UniqueName: \"kubernetes.io/projected/77c754cb-8b6c-4c93-940e-fb17e49b51e6-kube-api-access-72k8s\") pod \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.063878 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-utilities\") pod \"7d4408c0-2666-4112-ba0d-c9427a29fc66\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.063927 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zlhd\" (UniqueName: \"kubernetes.io/projected/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-kube-api-access-5zlhd\") pod \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.063981 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-utilities\") pod \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064023 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-operator-metrics\") pod \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064045 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-utilities\") pod \"840bff63-3c46-4e8d-baa2-32315812df45\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064104 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-catalog-content\") pod \"7d4408c0-2666-4112-ba0d-c9427a29fc66\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064124 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-catalog-content\") pod \"840bff63-3c46-4e8d-baa2-32315812df45\" (UID: \"840bff63-3c46-4e8d-baa2-32315812df45\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064148 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnktp\" (UniqueName: \"kubernetes.io/projected/7d4408c0-2666-4112-ba0d-c9427a29fc66-kube-api-access-rnktp\") pod \"7d4408c0-2666-4112-ba0d-c9427a29fc66\" (UID: \"7d4408c0-2666-4112-ba0d-c9427a29fc66\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064181 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-trusted-ca\") pod \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\" (UID: \"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064199 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-catalog-content\") pod \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\" (UID: \"77c754cb-8b6c-4c93-940e-fb17e49b51e6\") " Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064438 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9ssd\" (UniqueName: \"kubernetes.io/projected/6ee375d9-c78e-4c98-a04f-c004903dbf12-kube-api-access-z9ssd\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.064451 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.065716 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-utilities" (OuterVolumeSpecName: "utilities") pod "7d4408c0-2666-4112-ba0d-c9427a29fc66" (UID: "7d4408c0-2666-4112-ba0d-c9427a29fc66"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.065765 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" (UID: "4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.066059 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-utilities" (OuterVolumeSpecName: "utilities") pod "840bff63-3c46-4e8d-baa2-32315812df45" (UID: "840bff63-3c46-4e8d-baa2-32315812df45"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.067992 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-utilities" (OuterVolumeSpecName: "utilities") pod "77c754cb-8b6c-4c93-940e-fb17e49b51e6" (UID: "77c754cb-8b6c-4c93-940e-fb17e49b51e6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.069333 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/840bff63-3c46-4e8d-baa2-32315812df45-kube-api-access-9c8m7" (OuterVolumeSpecName: "kube-api-access-9c8m7") pod "840bff63-3c46-4e8d-baa2-32315812df45" (UID: "840bff63-3c46-4e8d-baa2-32315812df45"). InnerVolumeSpecName "kube-api-access-9c8m7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.070572 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" (UID: "4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.070883 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d4408c0-2666-4112-ba0d-c9427a29fc66-kube-api-access-rnktp" (OuterVolumeSpecName: "kube-api-access-rnktp") pod "7d4408c0-2666-4112-ba0d-c9427a29fc66" (UID: "7d4408c0-2666-4112-ba0d-c9427a29fc66"). InnerVolumeSpecName "kube-api-access-rnktp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.072178 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77c754cb-8b6c-4c93-940e-fb17e49b51e6-kube-api-access-72k8s" (OuterVolumeSpecName: "kube-api-access-72k8s") pod "77c754cb-8b6c-4c93-940e-fb17e49b51e6" (UID: "77c754cb-8b6c-4c93-940e-fb17e49b51e6"). InnerVolumeSpecName "kube-api-access-72k8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.074676 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-kube-api-access-5zlhd" (OuterVolumeSpecName: "kube-api-access-5zlhd") pod "4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" (UID: "4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3"). InnerVolumeSpecName "kube-api-access-5zlhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.090210 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77c754cb-8b6c-4c93-940e-fb17e49b51e6" (UID: "77c754cb-8b6c-4c93-940e-fb17e49b51e6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.122049 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "840bff63-3c46-4e8d-baa2-32315812df45" (UID: "840bff63-3c46-4e8d-baa2-32315812df45"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.130378 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7d4408c0-2666-4112-ba0d-c9427a29fc66" (UID: "7d4408c0-2666-4112-ba0d-c9427a29fc66"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.150202 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6ee375d9-c78e-4c98-a04f-c004903dbf12" (UID: "6ee375d9-c78e-4c98-a04f-c004903dbf12"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165684 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zlhd\" (UniqueName: \"kubernetes.io/projected/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-kube-api-access-5zlhd\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165727 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ee375d9-c78e-4c98-a04f-c004903dbf12-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165738 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165749 4861 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165760 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165773 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165785 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/840bff63-3c46-4e8d-baa2-32315812df45-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165800 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnktp\" (UniqueName: \"kubernetes.io/projected/7d4408c0-2666-4112-ba0d-c9427a29fc66-kube-api-access-rnktp\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165816 4861 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165824 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77c754cb-8b6c-4c93-940e-fb17e49b51e6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165834 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c8m7\" (UniqueName: \"kubernetes.io/projected/840bff63-3c46-4e8d-baa2-32315812df45-kube-api-access-9c8m7\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165843 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72k8s\" (UniqueName: \"kubernetes.io/projected/77c754cb-8b6c-4c93-940e-fb17e49b51e6-kube-api-access-72k8s\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.165852 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d4408c0-2666-4112-ba0d-c9427a29fc66-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.264916 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qd79z"] Jan 29 06:41:26 crc kubenswrapper[4861]: W0129 06:41:26.289951 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72d86b3f_e0da_4302_8327_90fc7eebdf64.slice/crio-92ddb67047c8ae880430f221a76924311e6cc157b54b2d8e1b2e787b18c3d64f WatchSource:0}: Error finding container 92ddb67047c8ae880430f221a76924311e6cc157b54b2d8e1b2e787b18c3d64f: Status 404 returned error can't find the container with id 92ddb67047c8ae880430f221a76924311e6cc157b54b2d8e1b2e787b18c3d64f Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.522314 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79pqx" event={"ID":"7d4408c0-2666-4112-ba0d-c9427a29fc66","Type":"ContainerDied","Data":"02d66eb19cdd90c1dafb5582c1484fe7a3f025c121652a052d3a9f6e13ceec85"} Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.522931 4861 scope.go:117] "RemoveContainer" containerID="90c8c54be52c4184fdb1c277adff8d4d4a01a80f606aacb12d029f2c69754db9" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.522386 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-79pqx" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.525397 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" event={"ID":"72d86b3f-e0da-4302-8327-90fc7eebdf64","Type":"ContainerStarted","Data":"397a08737fb11ca5a9d8e548fc7b6e83a13dec400572163001158b694e848f02"} Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.526198 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.526405 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" event={"ID":"72d86b3f-e0da-4302-8327-90fc7eebdf64","Type":"ContainerStarted","Data":"92ddb67047c8ae880430f221a76924311e6cc157b54b2d8e1b2e787b18c3d64f"} Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.528775 4861 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-qd79z container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": dial tcp 10.217.0.63:8080: connect: connection refused" start-of-body= Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.528885 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" podUID="72d86b3f-e0da-4302-8327-90fc7eebdf64" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": dial tcp 10.217.0.63:8080: connect: connection refused" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.531517 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4rxt" event={"ID":"6ee375d9-c78e-4c98-a04f-c004903dbf12","Type":"ContainerDied","Data":"1f89b31f4f951a938be6850891200332be3272cab6cb70843d30bb49ef62c0c5"} Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.531837 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z4rxt" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.543599 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.543908 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7tpcj" event={"ID":"4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3","Type":"ContainerDied","Data":"724a1208bb7f50764d1a7b57ca02e98f8c7a2e41d5e5e52ff6a5d313c7e6901e"} Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.545261 4861 scope.go:117] "RemoveContainer" containerID="92c618353f925332b79e56974f98214c789477aabbf9d60eaa52cff716aed54b" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.551213 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b5hfw" event={"ID":"840bff63-3c46-4e8d-baa2-32315812df45","Type":"ContainerDied","Data":"4099aef213edc2a757958af7ee5d9e7e9486646ec155b4dc13fecb7d9a48bf58"} Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.551370 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b5hfw" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.561733 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" podStartSLOduration=1.5616702789999999 podStartE2EDuration="1.561670279s" podCreationTimestamp="2026-01-29 06:41:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:41:26.550555729 +0000 UTC m=+378.222050306" watchObservedRunningTime="2026-01-29 06:41:26.561670279 +0000 UTC m=+378.233164896" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.568348 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-whxwx" event={"ID":"77c754cb-8b6c-4c93-940e-fb17e49b51e6","Type":"ContainerDied","Data":"8330dc22f386d4c3fcb513c9ef416971b9193dc6658c07445413c47913f633cd"} Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.568397 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-whxwx" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.571133 4861 scope.go:117] "RemoveContainer" containerID="197c321c79ef1fbf0df338b6861071eec4006ec285d775b09dec1181b80d7db7" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.612832 4861 scope.go:117] "RemoveContainer" containerID="8e8ede30a4d4a538163d26a90f46dc759aa32cb317538af95b24ccbfa64656c6" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.632936 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-z4rxt"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.640485 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-z4rxt"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.641146 4861 scope.go:117] "RemoveContainer" containerID="8cfa40fc5e3178de37dca99ead2ffb6916847fdd8db9dcc6fc6752ab01298cb9" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.660068 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-79pqx"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.665543 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-79pqx"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.672223 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7tpcj"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.674397 4861 scope.go:117] "RemoveContainer" containerID="1f17e4f79ebf610072f06f1725d3c6805c6bfa238c85aedf1d6eec6fd2de6189" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.677408 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7tpcj"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.685259 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-whxwx"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.690276 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-whxwx"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.696443 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b5hfw"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.699412 4861 scope.go:117] "RemoveContainer" containerID="19f7178c968ec7212efe71f4d35a95ce9a3834a3bef62d87f6caa36247bf0060" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.703820 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-b5hfw"] Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.733578 4861 scope.go:117] "RemoveContainer" containerID="053e49e4cdf2556eeb1448e3d207d941d25190edad7baf91d26d6cf6bf5884c3" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.753045 4861 scope.go:117] "RemoveContainer" containerID="f48abefc5f75182e4cdf18152fd284ad397fab3e6e5444144965d17064bebe2e" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.771832 4861 scope.go:117] "RemoveContainer" containerID="cd75d3b90bd93e3166e7cb14ec279d247afb5f53401fb999198b975610b288e0" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.801945 4861 scope.go:117] "RemoveContainer" containerID="398d8b4115f86cb01cfb2d60fb53e0cfcc25574d8a811f1e070d966a593d9d4c" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.816135 4861 scope.go:117] "RemoveContainer" containerID="84ea732c3b8c40dab8109f2bd63b075ad7f6e6e85e52d71d890a8fdfb617faed" Jan 29 06:41:26 crc kubenswrapper[4861]: I0129 06:41:26.837601 4861 scope.go:117] "RemoveContainer" containerID="2cdab55f008f908014f320528609dd185e1599831f7c1d8fd05d449a5878af8f" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.125384 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" path="/var/lib/kubelet/pods/4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3/volumes" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.126299 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" path="/var/lib/kubelet/pods/6ee375d9-c78e-4c98-a04f-c004903dbf12/volumes" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.127141 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" path="/var/lib/kubelet/pods/77c754cb-8b6c-4c93-940e-fb17e49b51e6/volumes" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.128652 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" path="/var/lib/kubelet/pods/7d4408c0-2666-4112-ba0d-c9427a29fc66/volumes" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.129518 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="840bff63-3c46-4e8d-baa2-32315812df45" path="/var/lib/kubelet/pods/840bff63-3c46-4e8d-baa2-32315812df45/volumes" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529163 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-m5xpq"] Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529497 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529514 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529529 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerName="extract-content" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529537 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerName="extract-content" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529550 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="extract-utilities" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529558 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="extract-utilities" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529567 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529573 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529580 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="extract-content" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529586 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="extract-content" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529595 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="840bff63-3c46-4e8d-baa2-32315812df45" containerName="extract-utilities" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529601 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="840bff63-3c46-4e8d-baa2-32315812df45" containerName="extract-utilities" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529611 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529618 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529714 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerName="extract-utilities" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529721 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerName="extract-utilities" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529728 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="840bff63-3c46-4e8d-baa2-32315812df45" containerName="extract-content" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529734 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="840bff63-3c46-4e8d-baa2-32315812df45" containerName="extract-content" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529746 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="840bff63-3c46-4e8d-baa2-32315812df45" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529760 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="840bff63-3c46-4e8d-baa2-32315812df45" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529771 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="extract-content" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529778 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="extract-content" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529791 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="extract-utilities" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529798 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="extract-utilities" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.529807 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529815 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529917 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d4408c0-2666-4112-ba0d-c9427a29fc66" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529938 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529947 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="77c754cb-8b6c-4c93-940e-fb17e49b51e6" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529959 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ee375d9-c78e-4c98-a04f-c004903dbf12" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.529968 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="840bff63-3c46-4e8d-baa2-32315812df45" containerName="registry-server" Jan 29 06:41:27 crc kubenswrapper[4861]: E0129 06:41:27.530057 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.530063 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.532984 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b6c4c81-2ddd-43f5-9ca1-573ada7abdc3" containerName="marketplace-operator" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.533794 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.538121 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.542401 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m5xpq"] Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.604446 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qd79z" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.692751 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ebc3092-6c7f-4514-956c-84cf246fec0e-catalog-content\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.692824 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnl77\" (UniqueName: \"kubernetes.io/projected/7ebc3092-6c7f-4514-956c-84cf246fec0e-kube-api-access-wnl77\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.693445 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ebc3092-6c7f-4514-956c-84cf246fec0e-utilities\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.715761 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-g5x2t"] Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.717089 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.719596 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.724769 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g5x2t"] Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.795666 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnl77\" (UniqueName: \"kubernetes.io/projected/7ebc3092-6c7f-4514-956c-84cf246fec0e-kube-api-access-wnl77\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.795792 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ebc3092-6c7f-4514-956c-84cf246fec0e-utilities\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.795845 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ebc3092-6c7f-4514-956c-84cf246fec0e-catalog-content\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.796572 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ebc3092-6c7f-4514-956c-84cf246fec0e-catalog-content\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.796627 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ebc3092-6c7f-4514-956c-84cf246fec0e-utilities\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.819380 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnl77\" (UniqueName: \"kubernetes.io/projected/7ebc3092-6c7f-4514-956c-84cf246fec0e-kube-api-access-wnl77\") pod \"redhat-marketplace-m5xpq\" (UID: \"7ebc3092-6c7f-4514-956c-84cf246fec0e\") " pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.897436 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.897566 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-675rv\" (UniqueName: \"kubernetes.io/projected/b9207674-08ce-403f-a91d-e3d2649c8dde-kube-api-access-675rv\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.897655 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9207674-08ce-403f-a91d-e3d2649c8dde-catalog-content\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.897874 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9207674-08ce-403f-a91d-e3d2649c8dde-utilities\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.999638 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9207674-08ce-403f-a91d-e3d2649c8dde-catalog-content\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.999728 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9207674-08ce-403f-a91d-e3d2649c8dde-utilities\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:27 crc kubenswrapper[4861]: I0129 06:41:27.999893 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-675rv\" (UniqueName: \"kubernetes.io/projected/b9207674-08ce-403f-a91d-e3d2649c8dde-kube-api-access-675rv\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.000255 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9207674-08ce-403f-a91d-e3d2649c8dde-catalog-content\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.000551 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9207674-08ce-403f-a91d-e3d2649c8dde-utilities\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.021740 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-675rv\" (UniqueName: \"kubernetes.io/projected/b9207674-08ce-403f-a91d-e3d2649c8dde-kube-api-access-675rv\") pod \"redhat-operators-g5x2t\" (UID: \"b9207674-08ce-403f-a91d-e3d2649c8dde\") " pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.040723 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.330406 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m5xpq"] Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.437944 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g5x2t"] Jan 29 06:41:28 crc kubenswrapper[4861]: W0129 06:41:28.445496 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9207674_08ce_403f_a91d_e3d2649c8dde.slice/crio-8e0ea6b35f285e77aa8153584f4ca192141536423ab7c70cb00acb65531303ab WatchSource:0}: Error finding container 8e0ea6b35f285e77aa8153584f4ca192141536423ab7c70cb00acb65531303ab: Status 404 returned error can't find the container with id 8e0ea6b35f285e77aa8153584f4ca192141536423ab7c70cb00acb65531303ab Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.601776 4861 generic.go:334] "Generic (PLEG): container finished" podID="7ebc3092-6c7f-4514-956c-84cf246fec0e" containerID="f74e39dfad5bc0ffb553600e87a69e249c5725bacbfe679c3f5ac81a2f06264a" exitCode=0 Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.601858 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5xpq" event={"ID":"7ebc3092-6c7f-4514-956c-84cf246fec0e","Type":"ContainerDied","Data":"f74e39dfad5bc0ffb553600e87a69e249c5725bacbfe679c3f5ac81a2f06264a"} Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.601896 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5xpq" event={"ID":"7ebc3092-6c7f-4514-956c-84cf246fec0e","Type":"ContainerStarted","Data":"4a9cc34b829daf3fa302a0e1f29ea3a7c8e0122c0cab228dd5dd3e7fd111f474"} Jan 29 06:41:28 crc kubenswrapper[4861]: I0129 06:41:28.604312 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g5x2t" event={"ID":"b9207674-08ce-403f-a91d-e3d2649c8dde","Type":"ContainerStarted","Data":"8e0ea6b35f285e77aa8153584f4ca192141536423ab7c70cb00acb65531303ab"} Jan 29 06:41:29 crc kubenswrapper[4861]: I0129 06:41:29.612965 4861 generic.go:334] "Generic (PLEG): container finished" podID="b9207674-08ce-403f-a91d-e3d2649c8dde" containerID="c10220e0540a53030eb2d3d953bd15ebdddc349565c62d04da8c906142b1e5b2" exitCode=0 Jan 29 06:41:29 crc kubenswrapper[4861]: I0129 06:41:29.613095 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g5x2t" event={"ID":"b9207674-08ce-403f-a91d-e3d2649c8dde","Type":"ContainerDied","Data":"c10220e0540a53030eb2d3d953bd15ebdddc349565c62d04da8c906142b1e5b2"} Jan 29 06:41:29 crc kubenswrapper[4861]: I0129 06:41:29.616489 4861 generic.go:334] "Generic (PLEG): container finished" podID="7ebc3092-6c7f-4514-956c-84cf246fec0e" containerID="3b9bfd2b4946322bbf20a5756fb434d26f67d2086d0c0226fadb9ffec9247860" exitCode=0 Jan 29 06:41:29 crc kubenswrapper[4861]: I0129 06:41:29.617337 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5xpq" event={"ID":"7ebc3092-6c7f-4514-956c-84cf246fec0e","Type":"ContainerDied","Data":"3b9bfd2b4946322bbf20a5756fb434d26f67d2086d0c0226fadb9ffec9247860"} Jan 29 06:41:29 crc kubenswrapper[4861]: I0129 06:41:29.915029 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qzrs9"] Jan 29 06:41:29 crc kubenswrapper[4861]: I0129 06:41:29.916510 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:29 crc kubenswrapper[4861]: I0129 06:41:29.925946 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 06:41:29 crc kubenswrapper[4861]: I0129 06:41:29.945498 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qzrs9"] Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.028785 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-utilities\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.029040 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2lqz\" (UniqueName: \"kubernetes.io/projected/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-kube-api-access-z2lqz\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.029231 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-catalog-content\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.129150 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lttc5"] Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.131052 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-catalog-content\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.131159 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-utilities\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.131196 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2lqz\" (UniqueName: \"kubernetes.io/projected/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-kube-api-access-z2lqz\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.131821 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-catalog-content\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.132047 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-utilities\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.144755 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lttc5"] Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.144923 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.153661 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.159586 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2lqz\" (UniqueName: \"kubernetes.io/projected/2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7-kube-api-access-z2lqz\") pod \"certified-operators-qzrs9\" (UID: \"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7\") " pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.232729 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3b12c8-6e96-4fe6-9002-3adf3912a768-catalog-content\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.232784 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3b12c8-6e96-4fe6-9002-3adf3912a768-utilities\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.232821 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbc7r\" (UniqueName: \"kubernetes.io/projected/eb3b12c8-6e96-4fe6-9002-3adf3912a768-kube-api-access-qbc7r\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.244470 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.334492 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3b12c8-6e96-4fe6-9002-3adf3912a768-utilities\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.334548 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbc7r\" (UniqueName: \"kubernetes.io/projected/eb3b12c8-6e96-4fe6-9002-3adf3912a768-kube-api-access-qbc7r\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.334623 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3b12c8-6e96-4fe6-9002-3adf3912a768-catalog-content\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.335099 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3b12c8-6e96-4fe6-9002-3adf3912a768-catalog-content\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.335323 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3b12c8-6e96-4fe6-9002-3adf3912a768-utilities\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.363204 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbc7r\" (UniqueName: \"kubernetes.io/projected/eb3b12c8-6e96-4fe6-9002-3adf3912a768-kube-api-access-qbc7r\") pod \"community-operators-lttc5\" (UID: \"eb3b12c8-6e96-4fe6-9002-3adf3912a768\") " pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.499425 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.626466 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g5x2t" event={"ID":"b9207674-08ce-403f-a91d-e3d2649c8dde","Type":"ContainerStarted","Data":"e4175f9b779e4900ceeb4ff70ba57e76bf1662d6c0fc9eac92cee48ae28cc311"} Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.629703 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.630459 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.635111 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5xpq" event={"ID":"7ebc3092-6c7f-4514-956c-84cf246fec0e","Type":"ContainerStarted","Data":"9a3a762ff6aa71206f52abf60ea0dd81d4d3b92caf054d466f2440c0a6a6c8b8"} Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.667804 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-m5xpq" podStartSLOduration=1.9637119429999998 podStartE2EDuration="3.66777944s" podCreationTimestamp="2026-01-29 06:41:27 +0000 UTC" firstStartedPulling="2026-01-29 06:41:28.604464138 +0000 UTC m=+380.275958695" lastFinishedPulling="2026-01-29 06:41:30.308531635 +0000 UTC m=+381.980026192" observedRunningTime="2026-01-29 06:41:30.6625763 +0000 UTC m=+382.334070857" watchObservedRunningTime="2026-01-29 06:41:30.66777944 +0000 UTC m=+382.339273997" Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.703378 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qzrs9"] Jan 29 06:41:30 crc kubenswrapper[4861]: I0129 06:41:30.980908 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lttc5"] Jan 29 06:41:31 crc kubenswrapper[4861]: I0129 06:41:31.643004 4861 generic.go:334] "Generic (PLEG): container finished" podID="eb3b12c8-6e96-4fe6-9002-3adf3912a768" containerID="b62ff1ebd71925b2f2d635e6979c05a05eff140ac92c6b63432e08301556bbde" exitCode=0 Jan 29 06:41:31 crc kubenswrapper[4861]: I0129 06:41:31.643418 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lttc5" event={"ID":"eb3b12c8-6e96-4fe6-9002-3adf3912a768","Type":"ContainerDied","Data":"b62ff1ebd71925b2f2d635e6979c05a05eff140ac92c6b63432e08301556bbde"} Jan 29 06:41:31 crc kubenswrapper[4861]: I0129 06:41:31.643464 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lttc5" event={"ID":"eb3b12c8-6e96-4fe6-9002-3adf3912a768","Type":"ContainerStarted","Data":"befae8ce063778fa8a432b689f3f2d5c966d8b5b014e745c921679ed27adcfa2"} Jan 29 06:41:31 crc kubenswrapper[4861]: I0129 06:41:31.647180 4861 generic.go:334] "Generic (PLEG): container finished" podID="2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7" containerID="d4bbb94183ea864a82fed9922863697919f22061023248d12f97f675beeab844" exitCode=0 Jan 29 06:41:31 crc kubenswrapper[4861]: I0129 06:41:31.647286 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzrs9" event={"ID":"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7","Type":"ContainerDied","Data":"d4bbb94183ea864a82fed9922863697919f22061023248d12f97f675beeab844"} Jan 29 06:41:31 crc kubenswrapper[4861]: I0129 06:41:31.647329 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzrs9" event={"ID":"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7","Type":"ContainerStarted","Data":"288a4ca5d9a45e74a1803a2e2adb0d525af6df5b08246aabfb900db828434ac4"} Jan 29 06:41:31 crc kubenswrapper[4861]: I0129 06:41:31.651838 4861 generic.go:334] "Generic (PLEG): container finished" podID="b9207674-08ce-403f-a91d-e3d2649c8dde" containerID="e4175f9b779e4900ceeb4ff70ba57e76bf1662d6c0fc9eac92cee48ae28cc311" exitCode=0 Jan 29 06:41:31 crc kubenswrapper[4861]: I0129 06:41:31.652724 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g5x2t" event={"ID":"b9207674-08ce-403f-a91d-e3d2649c8dde","Type":"ContainerDied","Data":"e4175f9b779e4900ceeb4ff70ba57e76bf1662d6c0fc9eac92cee48ae28cc311"} Jan 29 06:41:32 crc kubenswrapper[4861]: I0129 06:41:32.663285 4861 generic.go:334] "Generic (PLEG): container finished" podID="2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7" containerID="1ebe81409791be051b43b2d8a1769f55268ea7b04a21549f37529a3a526f54b3" exitCode=0 Jan 29 06:41:32 crc kubenswrapper[4861]: I0129 06:41:32.663358 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzrs9" event={"ID":"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7","Type":"ContainerDied","Data":"1ebe81409791be051b43b2d8a1769f55268ea7b04a21549f37529a3a526f54b3"} Jan 29 06:41:32 crc kubenswrapper[4861]: I0129 06:41:32.671929 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g5x2t" event={"ID":"b9207674-08ce-403f-a91d-e3d2649c8dde","Type":"ContainerStarted","Data":"58f4d44dc7efd5cc4d91e29d40111d9e55e8a46beaa7657e54e7712bc8cdb066"} Jan 29 06:41:32 crc kubenswrapper[4861]: I0129 06:41:32.743617 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-g5x2t" podStartSLOduration=3.295066891 podStartE2EDuration="5.74358674s" podCreationTimestamp="2026-01-29 06:41:27 +0000 UTC" firstStartedPulling="2026-01-29 06:41:29.614527146 +0000 UTC m=+381.286021703" lastFinishedPulling="2026-01-29 06:41:32.063046995 +0000 UTC m=+383.734541552" observedRunningTime="2026-01-29 06:41:32.742935093 +0000 UTC m=+384.414429650" watchObservedRunningTime="2026-01-29 06:41:32.74358674 +0000 UTC m=+384.415081327" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.015665 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" podUID="c66ab458-ce20-4c27-99d5-e328b6397bd4" containerName="registry" containerID="cri-o://8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781" gracePeriod=30 Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.548192 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.678895 4861 generic.go:334] "Generic (PLEG): container finished" podID="eb3b12c8-6e96-4fe6-9002-3adf3912a768" containerID="5b1ee237c5385dc7c064c01305117da44f1de04b5970aa0e7aac5788f298b97d" exitCode=0 Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.678977 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lttc5" event={"ID":"eb3b12c8-6e96-4fe6-9002-3adf3912a768","Type":"ContainerDied","Data":"5b1ee237c5385dc7c064c01305117da44f1de04b5970aa0e7aac5788f298b97d"} Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.683342 4861 generic.go:334] "Generic (PLEG): container finished" podID="c66ab458-ce20-4c27-99d5-e328b6397bd4" containerID="8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781" exitCode=0 Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.683388 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.683433 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" event={"ID":"c66ab458-ce20-4c27-99d5-e328b6397bd4","Type":"ContainerDied","Data":"8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781"} Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.683484 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-v4kqn" event={"ID":"c66ab458-ce20-4c27-99d5-e328b6397bd4","Type":"ContainerDied","Data":"3f0ac800c801eff6587047f590baf5296e1ac5756659a8ea23c6d8257a461c2c"} Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.683510 4861 scope.go:117] "RemoveContainer" containerID="8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.694359 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzrs9" event={"ID":"2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7","Type":"ContainerStarted","Data":"f0bd35e0452b940722c8b1f137ab42ca4abb7696c236e76622ff2c854e915360"} Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.710795 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c66ab458-ce20-4c27-99d5-e328b6397bd4-installation-pull-secrets\") pod \"c66ab458-ce20-4c27-99d5-e328b6397bd4\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.710915 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qt68k\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-kube-api-access-qt68k\") pod \"c66ab458-ce20-4c27-99d5-e328b6397bd4\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.710948 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-trusted-ca\") pod \"c66ab458-ce20-4c27-99d5-e328b6397bd4\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.711028 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c66ab458-ce20-4c27-99d5-e328b6397bd4-ca-trust-extracted\") pod \"c66ab458-ce20-4c27-99d5-e328b6397bd4\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.711107 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-tls\") pod \"c66ab458-ce20-4c27-99d5-e328b6397bd4\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.711418 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"c66ab458-ce20-4c27-99d5-e328b6397bd4\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.711483 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-bound-sa-token\") pod \"c66ab458-ce20-4c27-99d5-e328b6397bd4\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.711504 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-certificates\") pod \"c66ab458-ce20-4c27-99d5-e328b6397bd4\" (UID: \"c66ab458-ce20-4c27-99d5-e328b6397bd4\") " Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.713283 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "c66ab458-ce20-4c27-99d5-e328b6397bd4" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.715392 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "c66ab458-ce20-4c27-99d5-e328b6397bd4" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.720767 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-kube-api-access-qt68k" (OuterVolumeSpecName: "kube-api-access-qt68k") pod "c66ab458-ce20-4c27-99d5-e328b6397bd4" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4"). InnerVolumeSpecName "kube-api-access-qt68k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.721162 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "c66ab458-ce20-4c27-99d5-e328b6397bd4" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.721210 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c66ab458-ce20-4c27-99d5-e328b6397bd4-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "c66ab458-ce20-4c27-99d5-e328b6397bd4" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.724540 4861 scope.go:117] "RemoveContainer" containerID="8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781" Jan 29 06:41:33 crc kubenswrapper[4861]: E0129 06:41:33.725672 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781\": container with ID starting with 8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781 not found: ID does not exist" containerID="8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.725811 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781"} err="failed to get container status \"8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781\": rpc error: code = NotFound desc = could not find container \"8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781\": container with ID starting with 8021e2b85f8e5fb6e94c83d6770b620417da6e5c3eb864f22aacf53e906ac781 not found: ID does not exist" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.726028 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "c66ab458-ce20-4c27-99d5-e328b6397bd4" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.727912 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "c66ab458-ce20-4c27-99d5-e328b6397bd4" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.731404 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c66ab458-ce20-4c27-99d5-e328b6397bd4-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "c66ab458-ce20-4c27-99d5-e328b6397bd4" (UID: "c66ab458-ce20-4c27-99d5-e328b6397bd4"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.737646 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qzrs9" podStartSLOduration=3.210644398 podStartE2EDuration="4.737619506s" podCreationTimestamp="2026-01-29 06:41:29 +0000 UTC" firstStartedPulling="2026-01-29 06:41:31.649168055 +0000 UTC m=+383.320662612" lastFinishedPulling="2026-01-29 06:41:33.176143163 +0000 UTC m=+384.847637720" observedRunningTime="2026-01-29 06:41:33.73368921 +0000 UTC m=+385.405183787" watchObservedRunningTime="2026-01-29 06:41:33.737619506 +0000 UTC m=+385.409114073" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.812912 4861 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.812955 4861 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.812968 4861 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c66ab458-ce20-4c27-99d5-e328b6397bd4-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.812979 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qt68k\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-kube-api-access-qt68k\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.812995 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c66ab458-ce20-4c27-99d5-e328b6397bd4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.813005 4861 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c66ab458-ce20-4c27-99d5-e328b6397bd4-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:33 crc kubenswrapper[4861]: I0129 06:41:33.813016 4861 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c66ab458-ce20-4c27-99d5-e328b6397bd4-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:41:34 crc kubenswrapper[4861]: I0129 06:41:34.016816 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v4kqn"] Jan 29 06:41:34 crc kubenswrapper[4861]: I0129 06:41:34.023995 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-v4kqn"] Jan 29 06:41:34 crc kubenswrapper[4861]: I0129 06:41:34.703489 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lttc5" event={"ID":"eb3b12c8-6e96-4fe6-9002-3adf3912a768","Type":"ContainerStarted","Data":"d4f8c1d0505d981a2799e0a6e51bc4b52371f047052ce39ac3200c196022d0a5"} Jan 29 06:41:34 crc kubenswrapper[4861]: I0129 06:41:34.731629 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lttc5" podStartSLOduration=2.261307735 podStartE2EDuration="4.73159438s" podCreationTimestamp="2026-01-29 06:41:30 +0000 UTC" firstStartedPulling="2026-01-29 06:41:31.645875946 +0000 UTC m=+383.317370523" lastFinishedPulling="2026-01-29 06:41:34.116162611 +0000 UTC m=+385.787657168" observedRunningTime="2026-01-29 06:41:34.725118345 +0000 UTC m=+386.396612892" watchObservedRunningTime="2026-01-29 06:41:34.73159438 +0000 UTC m=+386.403088937" Jan 29 06:41:35 crc kubenswrapper[4861]: I0129 06:41:35.131213 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c66ab458-ce20-4c27-99d5-e328b6397bd4" path="/var/lib/kubelet/pods/c66ab458-ce20-4c27-99d5-e328b6397bd4/volumes" Jan 29 06:41:37 crc kubenswrapper[4861]: I0129 06:41:37.898049 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:37 crc kubenswrapper[4861]: I0129 06:41:37.898849 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:37 crc kubenswrapper[4861]: I0129 06:41:37.947233 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:38 crc kubenswrapper[4861]: I0129 06:41:38.042128 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:38 crc kubenswrapper[4861]: I0129 06:41:38.042203 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:38 crc kubenswrapper[4861]: I0129 06:41:38.800408 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-m5xpq" Jan 29 06:41:39 crc kubenswrapper[4861]: I0129 06:41:39.107705 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g5x2t" podUID="b9207674-08ce-403f-a91d-e3d2649c8dde" containerName="registry-server" probeResult="failure" output=< Jan 29 06:41:39 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 06:41:39 crc kubenswrapper[4861]: > Jan 29 06:41:40 crc kubenswrapper[4861]: I0129 06:41:40.245116 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:40 crc kubenswrapper[4861]: I0129 06:41:40.245193 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:40 crc kubenswrapper[4861]: I0129 06:41:40.310431 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:40 crc kubenswrapper[4861]: I0129 06:41:40.500949 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:40 crc kubenswrapper[4861]: I0129 06:41:40.501402 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:40 crc kubenswrapper[4861]: I0129 06:41:40.562312 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:40 crc kubenswrapper[4861]: I0129 06:41:40.795245 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lttc5" Jan 29 06:41:40 crc kubenswrapper[4861]: I0129 06:41:40.815508 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qzrs9" Jan 29 06:41:48 crc kubenswrapper[4861]: I0129 06:41:48.099603 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:41:48 crc kubenswrapper[4861]: I0129 06:41:48.149927 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-g5x2t" Jan 29 06:42:00 crc kubenswrapper[4861]: I0129 06:42:00.629539 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:42:00 crc kubenswrapper[4861]: I0129 06:42:00.629998 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:42:00 crc kubenswrapper[4861]: I0129 06:42:00.630069 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:42:00 crc kubenswrapper[4861]: I0129 06:42:00.631274 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"36705bd8ad43bcb9108d63a8d54cd293b1f2d38e6068c9bf489c044dc50abf8d"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 06:42:00 crc kubenswrapper[4861]: I0129 06:42:00.631431 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://36705bd8ad43bcb9108d63a8d54cd293b1f2d38e6068c9bf489c044dc50abf8d" gracePeriod=600 Jan 29 06:42:00 crc kubenswrapper[4861]: I0129 06:42:00.884773 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"36705bd8ad43bcb9108d63a8d54cd293b1f2d38e6068c9bf489c044dc50abf8d"} Jan 29 06:42:00 crc kubenswrapper[4861]: I0129 06:42:00.884933 4861 scope.go:117] "RemoveContainer" containerID="2bd17782a4029d73bed6fbec0ce6ee3e6ac0268c76744fc8da1adc7631a16bd3" Jan 29 06:42:00 crc kubenswrapper[4861]: I0129 06:42:00.885764 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="36705bd8ad43bcb9108d63a8d54cd293b1f2d38e6068c9bf489c044dc50abf8d" exitCode=0 Jan 29 06:42:01 crc kubenswrapper[4861]: I0129 06:42:01.897223 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"0d12901ad2c4d552214db203d9884d3b0f3d1a351d3daee275dcc60b3d88c93d"} Jan 29 06:44:00 crc kubenswrapper[4861]: I0129 06:44:00.630474 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:44:00 crc kubenswrapper[4861]: I0129 06:44:00.631277 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:44:30 crc kubenswrapper[4861]: I0129 06:44:30.629596 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:44:30 crc kubenswrapper[4861]: I0129 06:44:30.631574 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.213485 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5"] Jan 29 06:45:00 crc kubenswrapper[4861]: E0129 06:45:00.214461 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c66ab458-ce20-4c27-99d5-e328b6397bd4" containerName="registry" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.214483 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c66ab458-ce20-4c27-99d5-e328b6397bd4" containerName="registry" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.214626 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c66ab458-ce20-4c27-99d5-e328b6397bd4" containerName="registry" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.215068 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.219456 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.219809 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.235173 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5"] Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.325307 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5bx9\" (UniqueName: \"kubernetes.io/projected/718f749b-e564-48bf-8df8-8d3070a0fb53-kube-api-access-b5bx9\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.325375 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/718f749b-e564-48bf-8df8-8d3070a0fb53-secret-volume\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.325399 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/718f749b-e564-48bf-8df8-8d3070a0fb53-config-volume\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.426380 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5bx9\" (UniqueName: \"kubernetes.io/projected/718f749b-e564-48bf-8df8-8d3070a0fb53-kube-api-access-b5bx9\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.426876 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/718f749b-e564-48bf-8df8-8d3070a0fb53-secret-volume\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.426917 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/718f749b-e564-48bf-8df8-8d3070a0fb53-config-volume\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.428982 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/718f749b-e564-48bf-8df8-8d3070a0fb53-config-volume\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.436572 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/718f749b-e564-48bf-8df8-8d3070a0fb53-secret-volume\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.448591 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5bx9\" (UniqueName: \"kubernetes.io/projected/718f749b-e564-48bf-8df8-8d3070a0fb53-kube-api-access-b5bx9\") pod \"collect-profiles-29494485-42mq5\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.572960 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.629664 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.629752 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.629819 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.630609 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0d12901ad2c4d552214db203d9884d3b0f3d1a351d3daee275dcc60b3d88c93d"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.630673 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://0d12901ad2c4d552214db203d9884d3b0f3d1a351d3daee275dcc60b3d88c93d" gracePeriod=600 Jan 29 06:45:00 crc kubenswrapper[4861]: I0129 06:45:00.835704 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5"] Jan 29 06:45:00 crc kubenswrapper[4861]: W0129 06:45:00.861299 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod718f749b_e564_48bf_8df8_8d3070a0fb53.slice/crio-f24e0e55f0bea82df53e47d87c18d978db8fadf96972ec7ae9f210f47f8cdb45 WatchSource:0}: Error finding container f24e0e55f0bea82df53e47d87c18d978db8fadf96972ec7ae9f210f47f8cdb45: Status 404 returned error can't find the container with id f24e0e55f0bea82df53e47d87c18d978db8fadf96972ec7ae9f210f47f8cdb45 Jan 29 06:45:01 crc kubenswrapper[4861]: I0129 06:45:01.496954 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="0d12901ad2c4d552214db203d9884d3b0f3d1a351d3daee275dcc60b3d88c93d" exitCode=0 Jan 29 06:45:01 crc kubenswrapper[4861]: I0129 06:45:01.497063 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"0d12901ad2c4d552214db203d9884d3b0f3d1a351d3daee275dcc60b3d88c93d"} Jan 29 06:45:01 crc kubenswrapper[4861]: I0129 06:45:01.497518 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"7f4deb5daa5740edb0e7467d9109be2012b4d8eeb7a5057275a40485d2be7713"} Jan 29 06:45:01 crc kubenswrapper[4861]: I0129 06:45:01.497546 4861 scope.go:117] "RemoveContainer" containerID="36705bd8ad43bcb9108d63a8d54cd293b1f2d38e6068c9bf489c044dc50abf8d" Jan 29 06:45:01 crc kubenswrapper[4861]: I0129 06:45:01.499209 4861 generic.go:334] "Generic (PLEG): container finished" podID="718f749b-e564-48bf-8df8-8d3070a0fb53" containerID="54dc8617377b933b87ae72bc3453242a94358ec959f01d1e1ce0d668620102bb" exitCode=0 Jan 29 06:45:01 crc kubenswrapper[4861]: I0129 06:45:01.499261 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" event={"ID":"718f749b-e564-48bf-8df8-8d3070a0fb53","Type":"ContainerDied","Data":"54dc8617377b933b87ae72bc3453242a94358ec959f01d1e1ce0d668620102bb"} Jan 29 06:45:01 crc kubenswrapper[4861]: I0129 06:45:01.499306 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" event={"ID":"718f749b-e564-48bf-8df8-8d3070a0fb53","Type":"ContainerStarted","Data":"f24e0e55f0bea82df53e47d87c18d978db8fadf96972ec7ae9f210f47f8cdb45"} Jan 29 06:45:02 crc kubenswrapper[4861]: I0129 06:45:02.865522 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:02 crc kubenswrapper[4861]: I0129 06:45:02.960102 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5bx9\" (UniqueName: \"kubernetes.io/projected/718f749b-e564-48bf-8df8-8d3070a0fb53-kube-api-access-b5bx9\") pod \"718f749b-e564-48bf-8df8-8d3070a0fb53\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " Jan 29 06:45:02 crc kubenswrapper[4861]: I0129 06:45:02.960277 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/718f749b-e564-48bf-8df8-8d3070a0fb53-secret-volume\") pod \"718f749b-e564-48bf-8df8-8d3070a0fb53\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " Jan 29 06:45:02 crc kubenswrapper[4861]: I0129 06:45:02.960359 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/718f749b-e564-48bf-8df8-8d3070a0fb53-config-volume\") pod \"718f749b-e564-48bf-8df8-8d3070a0fb53\" (UID: \"718f749b-e564-48bf-8df8-8d3070a0fb53\") " Jan 29 06:45:02 crc kubenswrapper[4861]: I0129 06:45:02.961600 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/718f749b-e564-48bf-8df8-8d3070a0fb53-config-volume" (OuterVolumeSpecName: "config-volume") pod "718f749b-e564-48bf-8df8-8d3070a0fb53" (UID: "718f749b-e564-48bf-8df8-8d3070a0fb53"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:45:02 crc kubenswrapper[4861]: I0129 06:45:02.966961 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/718f749b-e564-48bf-8df8-8d3070a0fb53-kube-api-access-b5bx9" (OuterVolumeSpecName: "kube-api-access-b5bx9") pod "718f749b-e564-48bf-8df8-8d3070a0fb53" (UID: "718f749b-e564-48bf-8df8-8d3070a0fb53"). InnerVolumeSpecName "kube-api-access-b5bx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:45:02 crc kubenswrapper[4861]: I0129 06:45:02.968192 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718f749b-e564-48bf-8df8-8d3070a0fb53-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "718f749b-e564-48bf-8df8-8d3070a0fb53" (UID: "718f749b-e564-48bf-8df8-8d3070a0fb53"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:45:03 crc kubenswrapper[4861]: I0129 06:45:03.061659 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/718f749b-e564-48bf-8df8-8d3070a0fb53-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 06:45:03 crc kubenswrapper[4861]: I0129 06:45:03.061717 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/718f749b-e564-48bf-8df8-8d3070a0fb53-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 06:45:03 crc kubenswrapper[4861]: I0129 06:45:03.061744 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5bx9\" (UniqueName: \"kubernetes.io/projected/718f749b-e564-48bf-8df8-8d3070a0fb53-kube-api-access-b5bx9\") on node \"crc\" DevicePath \"\"" Jan 29 06:45:03 crc kubenswrapper[4861]: I0129 06:45:03.522198 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" event={"ID":"718f749b-e564-48bf-8df8-8d3070a0fb53","Type":"ContainerDied","Data":"f24e0e55f0bea82df53e47d87c18d978db8fadf96972ec7ae9f210f47f8cdb45"} Jan 29 06:45:03 crc kubenswrapper[4861]: I0129 06:45:03.522276 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5" Jan 29 06:45:03 crc kubenswrapper[4861]: I0129 06:45:03.522277 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f24e0e55f0bea82df53e47d87c18d978db8fadf96972ec7ae9f210f47f8cdb45" Jan 29 06:45:09 crc kubenswrapper[4861]: I0129 06:45:09.406472 4861 scope.go:117] "RemoveContainer" containerID="a96fc85349ef18db88384fa595ca86e8ca949d305c36b2be39c2dd8ad3532a46" Jan 29 06:47:00 crc kubenswrapper[4861]: I0129 06:47:00.630571 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:47:00 crc kubenswrapper[4861]: I0129 06:47:00.631063 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.557600 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5xdwl"] Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.559049 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovn-controller" containerID="cri-o://8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb" gracePeriod=30 Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.559127 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="nbdb" containerID="cri-o://2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2" gracePeriod=30 Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.559255 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="northd" containerID="cri-o://cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e" gracePeriod=30 Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.559320 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0" gracePeriod=30 Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.559375 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kube-rbac-proxy-node" containerID="cri-o://4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a" gracePeriod=30 Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.559433 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovn-acl-logging" containerID="cri-o://cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5" gracePeriod=30 Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.559554 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="sbdb" containerID="cri-o://de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef" gracePeriod=30 Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.654398 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" containerID="cri-o://39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3" gracePeriod=30 Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.966527 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/3.log" Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.970362 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovn-acl-logging/0.log" Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.971382 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovn-controller/0.log" Jan 29 06:47:15 crc kubenswrapper[4861]: I0129 06:47:15.972110 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.027766 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-config\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.027880 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-systemd-units\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.027918 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-netns\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.027944 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-slash\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.027979 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c6ece014-5432-4877-9449-4253d6124c73-ovn-node-metrics-cert\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.027999 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028012 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-var-lib-openvswitch\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028094 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028117 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpcvl\" (UniqueName: \"kubernetes.io/projected/c6ece014-5432-4877-9449-4253d6124c73-kube-api-access-jpcvl\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028148 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-openvswitch\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028136 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-slash" (OuterVolumeSpecName: "host-slash") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028040 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028220 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028167 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-etc-openvswitch\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028184 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028298 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-node-log\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028383 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-node-log" (OuterVolumeSpecName: "node-log") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028396 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-kubelet\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028425 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028458 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-bin\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028515 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-var-lib-cni-networks-ovn-kubernetes\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028551 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-ovn\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028610 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-script-lib\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028608 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028629 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028652 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-systemd\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028673 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028698 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-netd\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028730 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-env-overrides\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028761 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-ovn-kubernetes\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028788 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-log-socket\") pod \"c6ece014-5432-4877-9449-4253d6124c73\" (UID: \"c6ece014-5432-4877-9449-4253d6124c73\") " Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028888 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.028939 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029100 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-log-socket" (OuterVolumeSpecName: "log-socket") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029158 4861 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029211 4861 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-slash\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029229 4861 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029246 4861 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029261 4861 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029277 4861 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029281 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029292 4861 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-node-log\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029330 4861 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029343 4861 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029357 4861 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029373 4861 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029390 4861 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029403 4861 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.029820 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.030047 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.038106 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6ece014-5432-4877-9449-4253d6124c73-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.043699 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6ece014-5432-4877-9449-4253d6124c73-kube-api-access-jpcvl" (OuterVolumeSpecName: "kube-api-access-jpcvl") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "kube-api-access-jpcvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053023 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wcmfx"] Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053398 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovn-acl-logging" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053428 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovn-acl-logging" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053447 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kubecfg-setup" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053460 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kubecfg-setup" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053486 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053499 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053516 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovn-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053528 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovn-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053549 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053561 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053574 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="northd" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053586 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="northd" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053603 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="nbdb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053615 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="nbdb" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053635 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="718f749b-e564-48bf-8df8-8d3070a0fb53" containerName="collect-profiles" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053647 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="718f749b-e564-48bf-8df8-8d3070a0fb53" containerName="collect-profiles" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053666 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053677 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053694 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053707 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053722 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="sbdb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053733 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="sbdb" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053752 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kube-rbac-proxy-node" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053763 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kube-rbac-proxy-node" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.053780 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053792 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053960 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.053982 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kube-rbac-proxy-node" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054006 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="nbdb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054022 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054037 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054050 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054067 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054112 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovn-acl-logging" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054125 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="sbdb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054142 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="northd" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054158 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovn-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054177 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="718f749b-e564-48bf-8df8-8d3070a0fb53" containerName="collect-profiles" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.054414 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054430 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.054595 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ece014-5432-4877-9449-4253d6124c73" containerName="ovnkube-controller" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.056680 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "c6ece014-5432-4877-9449-4253d6124c73" (UID: "c6ece014-5432-4877-9449-4253d6124c73"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.058002 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.130578 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovnkube-script-lib\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.130935 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-node-log\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131013 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-log-socket\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131175 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-systemd-units\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-run-netns\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131275 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovnkube-config\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131305 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg5pr\" (UniqueName: \"kubernetes.io/projected/7b837071-10c5-4954-ac6c-31bf2b9484ed-kube-api-access-tg5pr\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131339 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-var-lib-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131370 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131396 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-etc-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131413 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-run-ovn-kubernetes\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131436 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-cni-bin\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131470 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-systemd\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131500 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-ovn\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131519 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131569 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-slash\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131586 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-kubelet\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131609 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-env-overrides\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131637 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-cni-netd\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131653 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovn-node-metrics-cert\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131710 4861 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131721 4861 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131732 4861 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131741 4861 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c6ece014-5432-4877-9449-4253d6124c73-log-socket\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131750 4861 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c6ece014-5432-4877-9449-4253d6124c73-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131760 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c6ece014-5432-4877-9449-4253d6124c73-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.131769 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpcvl\" (UniqueName: \"kubernetes.io/projected/c6ece014-5432-4877-9449-4253d6124c73-kube-api-access-jpcvl\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.233563 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.233794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234120 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-etc-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234036 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-etc-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234188 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-run-ovn-kubernetes\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234226 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-cni-bin\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234269 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-systemd\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234265 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-run-ovn-kubernetes\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234308 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234347 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-systemd\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234352 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-ovn\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234393 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234396 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-run-ovn\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234427 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-slash\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234457 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-kubelet\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234490 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-env-overrides\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234524 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-cni-netd\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234534 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-slash\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234352 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-cni-bin\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234557 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovn-node-metrics-cert\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234615 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovnkube-script-lib\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234662 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-node-log\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234695 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-log-socket\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234728 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-systemd-units\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234774 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-run-netns\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234805 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovnkube-config\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234845 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg5pr\" (UniqueName: \"kubernetes.io/projected/7b837071-10c5-4954-ac6c-31bf2b9484ed-kube-api-access-tg5pr\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234881 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-var-lib-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.234969 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-var-lib-openvswitch\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.235458 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-node-log\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.235536 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-systemd-units\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.235646 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-log-socket\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.235896 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-cni-netd\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.235913 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-kubelet\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.235946 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7b837071-10c5-4954-ac6c-31bf2b9484ed-host-run-netns\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.236027 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovnkube-script-lib\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.236456 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-env-overrides\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.237234 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovnkube-config\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.240340 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7b837071-10c5-4954-ac6c-31bf2b9484ed-ovn-node-metrics-cert\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.263479 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg5pr\" (UniqueName: \"kubernetes.io/projected/7b837071-10c5-4954-ac6c-31bf2b9484ed-kube-api-access-tg5pr\") pod \"ovnkube-node-wcmfx\" (UID: \"7b837071-10c5-4954-ac6c-31bf2b9484ed\") " pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.376212 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.437328 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/2.log" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.438489 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/1.log" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.438546 4861 generic.go:334] "Generic (PLEG): container finished" podID="da8019d1-2d2c-493d-b80f-1d566eec9475" containerID="22314fbcbe190aa61fb61652edc5f6d76649483f0d312e78cf43fb8b4fa49d7e" exitCode=2 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.438618 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4942p" event={"ID":"da8019d1-2d2c-493d-b80f-1d566eec9475","Type":"ContainerDied","Data":"22314fbcbe190aa61fb61652edc5f6d76649483f0d312e78cf43fb8b4fa49d7e"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.438666 4861 scope.go:117] "RemoveContainer" containerID="3da3918f3cb52bb47da6fe33239f62ab8ac3186ae1d0f732fab5a461a63463d2" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.439249 4861 scope.go:117] "RemoveContainer" containerID="22314fbcbe190aa61fb61652edc5f6d76649483f0d312e78cf43fb8b4fa49d7e" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.439605 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-4942p_openshift-multus(da8019d1-2d2c-493d-b80f-1d566eec9475)\"" pod="openshift-multus/multus-4942p" podUID="da8019d1-2d2c-493d-b80f-1d566eec9475" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.444911 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovnkube-controller/3.log" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.447175 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovn-acl-logging/0.log" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.448210 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5xdwl_c6ece014-5432-4877-9449-4253d6124c73/ovn-controller/0.log" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.448972 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3" exitCode=0 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449043 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef" exitCode=0 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449063 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2" exitCode=0 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449112 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e" exitCode=0 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449132 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0" exitCode=0 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449147 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a" exitCode=0 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449160 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5" exitCode=143 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449174 4861 generic.go:334] "Generic (PLEG): container finished" podID="c6ece014-5432-4877-9449-4253d6124c73" containerID="8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb" exitCode=143 Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449312 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449386 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449556 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449592 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449330 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449619 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449645 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449670 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449688 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449703 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449717 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449730 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449744 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449757 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449770 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449783 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449797 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449816 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449839 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449855 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449869 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449883 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449896 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449910 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449923 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449937 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449950 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449969 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.449987 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450008 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450023 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450038 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450052 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450066 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450116 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450130 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450143 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450156 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450169 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450187 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5xdwl" event={"ID":"c6ece014-5432-4877-9449-4253d6124c73","Type":"ContainerDied","Data":"20d47002536ef79877b65908f4cb24594ef452e5552e6bbe08a1c75aa13aebce"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450208 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450224 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450237 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450250 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450264 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450277 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450290 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450304 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450317 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.450330 4861 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.454664 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"39252f04c0be9f79a3991548fb9f07010d6f2d3db653137bc4fa2e1b3f3e12d8"} Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.474449 4861 scope.go:117] "RemoveContainer" containerID="39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.497394 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.508099 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5xdwl"] Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.520649 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5xdwl"] Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.540882 4861 scope.go:117] "RemoveContainer" containerID="de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.596342 4861 scope.go:117] "RemoveContainer" containerID="2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.613698 4861 scope.go:117] "RemoveContainer" containerID="cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.632238 4861 scope.go:117] "RemoveContainer" containerID="e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.644051 4861 scope.go:117] "RemoveContainer" containerID="4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.662486 4861 scope.go:117] "RemoveContainer" containerID="cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.684772 4861 scope.go:117] "RemoveContainer" containerID="8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.704751 4861 scope.go:117] "RemoveContainer" containerID="4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.730323 4861 scope.go:117] "RemoveContainer" containerID="39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.731517 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": container with ID starting with 39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3 not found: ID does not exist" containerID="39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.731572 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} err="failed to get container status \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": rpc error: code = NotFound desc = could not find container \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": container with ID starting with 39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.731604 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.732307 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": container with ID starting with 5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a not found: ID does not exist" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.732335 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} err="failed to get container status \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": rpc error: code = NotFound desc = could not find container \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": container with ID starting with 5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.732352 4861 scope.go:117] "RemoveContainer" containerID="de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.732878 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": container with ID starting with de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef not found: ID does not exist" containerID="de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.732944 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} err="failed to get container status \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": rpc error: code = NotFound desc = could not find container \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": container with ID starting with de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.732984 4861 scope.go:117] "RemoveContainer" containerID="2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.733878 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": container with ID starting with 2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2 not found: ID does not exist" containerID="2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.733935 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} err="failed to get container status \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": rpc error: code = NotFound desc = could not find container \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": container with ID starting with 2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.733968 4861 scope.go:117] "RemoveContainer" containerID="cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.734603 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": container with ID starting with cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e not found: ID does not exist" containerID="cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.734637 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} err="failed to get container status \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": rpc error: code = NotFound desc = could not find container \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": container with ID starting with cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.734661 4861 scope.go:117] "RemoveContainer" containerID="e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.735006 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": container with ID starting with e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0 not found: ID does not exist" containerID="e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.735047 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} err="failed to get container status \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": rpc error: code = NotFound desc = could not find container \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": container with ID starting with e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.735096 4861 scope.go:117] "RemoveContainer" containerID="4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.735475 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": container with ID starting with 4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a not found: ID does not exist" containerID="4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.735510 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} err="failed to get container status \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": rpc error: code = NotFound desc = could not find container \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": container with ID starting with 4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.735529 4861 scope.go:117] "RemoveContainer" containerID="cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.735904 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": container with ID starting with cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5 not found: ID does not exist" containerID="cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.735942 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} err="failed to get container status \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": rpc error: code = NotFound desc = could not find container \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": container with ID starting with cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.735966 4861 scope.go:117] "RemoveContainer" containerID="8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.736361 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": container with ID starting with 8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb not found: ID does not exist" containerID="8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.736393 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} err="failed to get container status \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": rpc error: code = NotFound desc = could not find container \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": container with ID starting with 8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.736415 4861 scope.go:117] "RemoveContainer" containerID="4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4" Jan 29 06:47:16 crc kubenswrapper[4861]: E0129 06:47:16.736914 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": container with ID starting with 4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4 not found: ID does not exist" containerID="4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.736954 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} err="failed to get container status \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": rpc error: code = NotFound desc = could not find container \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": container with ID starting with 4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.736982 4861 scope.go:117] "RemoveContainer" containerID="39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.737370 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} err="failed to get container status \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": rpc error: code = NotFound desc = could not find container \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": container with ID starting with 39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.737419 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.737748 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} err="failed to get container status \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": rpc error: code = NotFound desc = could not find container \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": container with ID starting with 5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.737778 4861 scope.go:117] "RemoveContainer" containerID="de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.738063 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} err="failed to get container status \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": rpc error: code = NotFound desc = could not find container \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": container with ID starting with de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.738120 4861 scope.go:117] "RemoveContainer" containerID="2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.738450 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} err="failed to get container status \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": rpc error: code = NotFound desc = could not find container \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": container with ID starting with 2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.738494 4861 scope.go:117] "RemoveContainer" containerID="cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.738773 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} err="failed to get container status \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": rpc error: code = NotFound desc = could not find container \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": container with ID starting with cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.738805 4861 scope.go:117] "RemoveContainer" containerID="e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.739117 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} err="failed to get container status \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": rpc error: code = NotFound desc = could not find container \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": container with ID starting with e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.739166 4861 scope.go:117] "RemoveContainer" containerID="4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.739483 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} err="failed to get container status \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": rpc error: code = NotFound desc = could not find container \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": container with ID starting with 4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.739526 4861 scope.go:117] "RemoveContainer" containerID="cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.739849 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} err="failed to get container status \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": rpc error: code = NotFound desc = could not find container \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": container with ID starting with cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.739881 4861 scope.go:117] "RemoveContainer" containerID="8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.740189 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} err="failed to get container status \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": rpc error: code = NotFound desc = could not find container \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": container with ID starting with 8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.740232 4861 scope.go:117] "RemoveContainer" containerID="4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.740569 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} err="failed to get container status \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": rpc error: code = NotFound desc = could not find container \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": container with ID starting with 4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.740595 4861 scope.go:117] "RemoveContainer" containerID="39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.740994 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} err="failed to get container status \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": rpc error: code = NotFound desc = could not find container \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": container with ID starting with 39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.741056 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.741414 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} err="failed to get container status \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": rpc error: code = NotFound desc = could not find container \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": container with ID starting with 5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.741445 4861 scope.go:117] "RemoveContainer" containerID="de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.741746 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} err="failed to get container status \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": rpc error: code = NotFound desc = could not find container \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": container with ID starting with de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.741795 4861 scope.go:117] "RemoveContainer" containerID="2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.742104 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} err="failed to get container status \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": rpc error: code = NotFound desc = could not find container \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": container with ID starting with 2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.742131 4861 scope.go:117] "RemoveContainer" containerID="cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.742435 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} err="failed to get container status \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": rpc error: code = NotFound desc = could not find container \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": container with ID starting with cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.742477 4861 scope.go:117] "RemoveContainer" containerID="e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.742766 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} err="failed to get container status \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": rpc error: code = NotFound desc = could not find container \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": container with ID starting with e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.742814 4861 scope.go:117] "RemoveContainer" containerID="4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.743045 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} err="failed to get container status \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": rpc error: code = NotFound desc = could not find container \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": container with ID starting with 4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.743087 4861 scope.go:117] "RemoveContainer" containerID="cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.743390 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} err="failed to get container status \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": rpc error: code = NotFound desc = could not find container \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": container with ID starting with cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.743430 4861 scope.go:117] "RemoveContainer" containerID="8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.743851 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} err="failed to get container status \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": rpc error: code = NotFound desc = could not find container \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": container with ID starting with 8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.743878 4861 scope.go:117] "RemoveContainer" containerID="4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.744171 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} err="failed to get container status \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": rpc error: code = NotFound desc = could not find container \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": container with ID starting with 4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.744226 4861 scope.go:117] "RemoveContainer" containerID="39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.744444 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3"} err="failed to get container status \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": rpc error: code = NotFound desc = could not find container \"39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3\": container with ID starting with 39501d8ba31f28af46a30457c21b6e6b527bc3619bf2ee163fe661ab5ab5d8f3 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.744465 4861 scope.go:117] "RemoveContainer" containerID="5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.744647 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a"} err="failed to get container status \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": rpc error: code = NotFound desc = could not find container \"5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a\": container with ID starting with 5f52dd37552ad663a684a6c08934bbd75587aaf7a5d34ea4d2cbeeb415bd3f6a not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.744660 4861 scope.go:117] "RemoveContainer" containerID="de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.744834 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef"} err="failed to get container status \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": rpc error: code = NotFound desc = could not find container \"de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef\": container with ID starting with de08b431a002efdce1cdcd513b311321f008d3a0dbab9522f836788cbfb7ceef not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.744861 4861 scope.go:117] "RemoveContainer" containerID="2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.745089 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2"} err="failed to get container status \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": rpc error: code = NotFound desc = could not find container \"2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2\": container with ID starting with 2f755a6ee299844b378e455903987b1797c997444dd289034cf43ae19c1d90e2 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.745108 4861 scope.go:117] "RemoveContainer" containerID="cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.745334 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e"} err="failed to get container status \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": rpc error: code = NotFound desc = could not find container \"cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e\": container with ID starting with cc65395d15a0b5e7998a7130c9631bd6884714b560a50bbd2697271eb7637b4e not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.745355 4861 scope.go:117] "RemoveContainer" containerID="e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.745629 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0"} err="failed to get container status \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": rpc error: code = NotFound desc = could not find container \"e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0\": container with ID starting with e4c82339f5ec1ce4a7fb886e3049d547a70e6e21b2135012fa0dca88e0979ce0 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.745655 4861 scope.go:117] "RemoveContainer" containerID="4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.745922 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a"} err="failed to get container status \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": rpc error: code = NotFound desc = could not find container \"4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a\": container with ID starting with 4b8eee044f1c38c9e356e6134a8337ab8974d8a4e23bdb5bc63101d76ef6923a not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.746126 4861 scope.go:117] "RemoveContainer" containerID="cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.746387 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5"} err="failed to get container status \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": rpc error: code = NotFound desc = could not find container \"cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5\": container with ID starting with cb9b7a5717111e39876443d1ff31c7238887f23a1f28791ab07d6789d93c9ca5 not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.746416 4861 scope.go:117] "RemoveContainer" containerID="8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.746872 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb"} err="failed to get container status \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": rpc error: code = NotFound desc = could not find container \"8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb\": container with ID starting with 8f0b6794582de5137274b3494d3731d656f0dbc6f30e4f738a44a00f3e4627eb not found: ID does not exist" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.746899 4861 scope.go:117] "RemoveContainer" containerID="4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4" Jan 29 06:47:16 crc kubenswrapper[4861]: I0129 06:47:16.747378 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4"} err="failed to get container status \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": rpc error: code = NotFound desc = could not find container \"4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4\": container with ID starting with 4aae471416ffb71aa52555203a11e58f211bae01a92c4d0c085667895052e9e4 not found: ID does not exist" Jan 29 06:47:17 crc kubenswrapper[4861]: I0129 06:47:17.133616 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6ece014-5432-4877-9449-4253d6124c73" path="/var/lib/kubelet/pods/c6ece014-5432-4877-9449-4253d6124c73/volumes" Jan 29 06:47:17 crc kubenswrapper[4861]: I0129 06:47:17.464033 4861 generic.go:334] "Generic (PLEG): container finished" podID="7b837071-10c5-4954-ac6c-31bf2b9484ed" containerID="57f51631192fb5145b2b90193254b46b9632d937208147c76ce8821192817029" exitCode=0 Jan 29 06:47:17 crc kubenswrapper[4861]: I0129 06:47:17.464140 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerDied","Data":"57f51631192fb5145b2b90193254b46b9632d937208147c76ce8821192817029"} Jan 29 06:47:17 crc kubenswrapper[4861]: I0129 06:47:17.467162 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/2.log" Jan 29 06:47:18 crc kubenswrapper[4861]: I0129 06:47:18.479295 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"5dde3f97e551e09da1d8fce48d4b2bc15f9fa60d9bc48c6fd170524629ce804f"} Jan 29 06:47:18 crc kubenswrapper[4861]: I0129 06:47:18.479640 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"3bff3dba3242bf0926753ef45da3ca1eb4133c64ad45d86f5dbad2883f792e0e"} Jan 29 06:47:18 crc kubenswrapper[4861]: I0129 06:47:18.479655 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"a5c5c5f8dd2d1fee749ab3edf725ac39ba374c357d9b05c975d36879293256a5"} Jan 29 06:47:18 crc kubenswrapper[4861]: I0129 06:47:18.479670 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"64e0ab3c6c0d6c204d3ec00fd0ba2fd96e6090dffe7bd1e99dd1f1c3d257bc05"} Jan 29 06:47:18 crc kubenswrapper[4861]: I0129 06:47:18.479680 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"dbaa73559d80e154caea541af23a28a2cd473c71ced2cef96bff1e24da11cea2"} Jan 29 06:47:18 crc kubenswrapper[4861]: I0129 06:47:18.479691 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"194ee204c638306f92904988545ad6c6b8ed9741df3641e90bcbaed7622b9f3e"} Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.225315 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-8hmnw"] Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.226866 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.230988 4861 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-9zmr6" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.231002 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.231535 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.233587 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.290000 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7rdd\" (UniqueName: \"kubernetes.io/projected/ccc1ed57-3186-442c-8132-0f6a7fcc1526-kube-api-access-j7rdd\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.290096 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ccc1ed57-3186-442c-8132-0f6a7fcc1526-crc-storage\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.290127 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ccc1ed57-3186-442c-8132-0f6a7fcc1526-node-mnt\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.391233 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7rdd\" (UniqueName: \"kubernetes.io/projected/ccc1ed57-3186-442c-8132-0f6a7fcc1526-kube-api-access-j7rdd\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.391295 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ccc1ed57-3186-442c-8132-0f6a7fcc1526-crc-storage\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.391324 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ccc1ed57-3186-442c-8132-0f6a7fcc1526-node-mnt\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.391638 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ccc1ed57-3186-442c-8132-0f6a7fcc1526-node-mnt\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.392624 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ccc1ed57-3186-442c-8132-0f6a7fcc1526-crc-storage\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.425577 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7rdd\" (UniqueName: \"kubernetes.io/projected/ccc1ed57-3186-442c-8132-0f6a7fcc1526-kube-api-access-j7rdd\") pod \"crc-storage-crc-8hmnw\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: I0129 06:47:20.555212 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: E0129 06:47:20.598174 4861 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8hmnw_crc-storage_ccc1ed57-3186-442c-8132-0f6a7fcc1526_0(a5a6586181eaa71f41f00990ea813c9187e3c2e75489899dc47085a0cc2e6e66): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 06:47:20 crc kubenswrapper[4861]: E0129 06:47:20.598256 4861 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8hmnw_crc-storage_ccc1ed57-3186-442c-8132-0f6a7fcc1526_0(a5a6586181eaa71f41f00990ea813c9187e3c2e75489899dc47085a0cc2e6e66): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: E0129 06:47:20.598284 4861 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8hmnw_crc-storage_ccc1ed57-3186-442c-8132-0f6a7fcc1526_0(a5a6586181eaa71f41f00990ea813c9187e3c2e75489899dc47085a0cc2e6e66): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:20 crc kubenswrapper[4861]: E0129 06:47:20.598329 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-8hmnw_crc-storage(ccc1ed57-3186-442c-8132-0f6a7fcc1526)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-8hmnw_crc-storage(ccc1ed57-3186-442c-8132-0f6a7fcc1526)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8hmnw_crc-storage_ccc1ed57-3186-442c-8132-0f6a7fcc1526_0(a5a6586181eaa71f41f00990ea813c9187e3c2e75489899dc47085a0cc2e6e66): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-8hmnw" podUID="ccc1ed57-3186-442c-8132-0f6a7fcc1526" Jan 29 06:47:21 crc kubenswrapper[4861]: I0129 06:47:21.511178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"e53c1facdc327d3fe81e6b43fe0a3c3be5f643946b5243c8656e23a39acd10ac"} Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.530196 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" event={"ID":"7b837071-10c5-4954-ac6c-31bf2b9484ed","Type":"ContainerStarted","Data":"e81c21af2a2da3977738d3bbc34f3735911935b026301dc0b618be29519220e3"} Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.530870 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.530887 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.530899 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.565186 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.567705 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.569430 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" podStartSLOduration=7.569404081 podStartE2EDuration="7.569404081s" podCreationTimestamp="2026-01-29 06:47:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:47:23.56557511 +0000 UTC m=+735.237069707" watchObservedRunningTime="2026-01-29 06:47:23.569404081 +0000 UTC m=+735.240898658" Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.840508 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-8hmnw"] Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.840681 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:23 crc kubenswrapper[4861]: I0129 06:47:23.841332 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:23 crc kubenswrapper[4861]: E0129 06:47:23.874786 4861 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8hmnw_crc-storage_ccc1ed57-3186-442c-8132-0f6a7fcc1526_0(86b8d9cfb72143179e8364ceac6178172f904dda9fd6a5aad11e007999526101): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 06:47:23 crc kubenswrapper[4861]: E0129 06:47:23.874856 4861 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8hmnw_crc-storage_ccc1ed57-3186-442c-8132-0f6a7fcc1526_0(86b8d9cfb72143179e8364ceac6178172f904dda9fd6a5aad11e007999526101): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:23 crc kubenswrapper[4861]: E0129 06:47:23.874880 4861 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8hmnw_crc-storage_ccc1ed57-3186-442c-8132-0f6a7fcc1526_0(86b8d9cfb72143179e8364ceac6178172f904dda9fd6a5aad11e007999526101): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:23 crc kubenswrapper[4861]: E0129 06:47:23.874935 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-8hmnw_crc-storage(ccc1ed57-3186-442c-8132-0f6a7fcc1526)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-8hmnw_crc-storage(ccc1ed57-3186-442c-8132-0f6a7fcc1526)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-8hmnw_crc-storage_ccc1ed57-3186-442c-8132-0f6a7fcc1526_0(86b8d9cfb72143179e8364ceac6178172f904dda9fd6a5aad11e007999526101): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-8hmnw" podUID="ccc1ed57-3186-442c-8132-0f6a7fcc1526" Jan 29 06:47:30 crc kubenswrapper[4861]: I0129 06:47:30.630314 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:47:30 crc kubenswrapper[4861]: I0129 06:47:30.631309 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:47:32 crc kubenswrapper[4861]: I0129 06:47:32.117380 4861 scope.go:117] "RemoveContainer" containerID="22314fbcbe190aa61fb61652edc5f6d76649483f0d312e78cf43fb8b4fa49d7e" Jan 29 06:47:32 crc kubenswrapper[4861]: I0129 06:47:32.599895 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4942p_da8019d1-2d2c-493d-b80f-1d566eec9475/kube-multus/2.log" Jan 29 06:47:32 crc kubenswrapper[4861]: I0129 06:47:32.600331 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4942p" event={"ID":"da8019d1-2d2c-493d-b80f-1d566eec9475","Type":"ContainerStarted","Data":"b0127966b655ec0c7c53783253b73cabd9b65979a807464307bab4e68f82114c"} Jan 29 06:47:38 crc kubenswrapper[4861]: I0129 06:47:38.115681 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:38 crc kubenswrapper[4861]: I0129 06:47:38.116980 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:38 crc kubenswrapper[4861]: I0129 06:47:38.585238 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-8hmnw"] Jan 29 06:47:38 crc kubenswrapper[4861]: I0129 06:47:38.599227 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 06:47:38 crc kubenswrapper[4861]: I0129 06:47:38.642110 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8hmnw" event={"ID":"ccc1ed57-3186-442c-8132-0f6a7fcc1526","Type":"ContainerStarted","Data":"bad993859193183b65dd0b31ee5976cda890bdbc59da3cbff4a554730d9a9cd6"} Jan 29 06:47:40 crc kubenswrapper[4861]: I0129 06:47:40.655613 4861 generic.go:334] "Generic (PLEG): container finished" podID="ccc1ed57-3186-442c-8132-0f6a7fcc1526" containerID="e2605ba51f074b0a77e2fdc954faacbeadf1b142896145745b9f4e66415a294a" exitCode=0 Jan 29 06:47:40 crc kubenswrapper[4861]: I0129 06:47:40.655669 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8hmnw" event={"ID":"ccc1ed57-3186-442c-8132-0f6a7fcc1526","Type":"ContainerDied","Data":"e2605ba51f074b0a77e2fdc954faacbeadf1b142896145745b9f4e66415a294a"} Jan 29 06:47:41 crc kubenswrapper[4861]: I0129 06:47:41.987996 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.069599 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7rdd\" (UniqueName: \"kubernetes.io/projected/ccc1ed57-3186-442c-8132-0f6a7fcc1526-kube-api-access-j7rdd\") pod \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.069741 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ccc1ed57-3186-442c-8132-0f6a7fcc1526-node-mnt\") pod \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.069785 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ccc1ed57-3186-442c-8132-0f6a7fcc1526-crc-storage\") pod \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\" (UID: \"ccc1ed57-3186-442c-8132-0f6a7fcc1526\") " Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.069886 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ccc1ed57-3186-442c-8132-0f6a7fcc1526-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "ccc1ed57-3186-442c-8132-0f6a7fcc1526" (UID: "ccc1ed57-3186-442c-8132-0f6a7fcc1526"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.070307 4861 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ccc1ed57-3186-442c-8132-0f6a7fcc1526-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.077173 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccc1ed57-3186-442c-8132-0f6a7fcc1526-kube-api-access-j7rdd" (OuterVolumeSpecName: "kube-api-access-j7rdd") pod "ccc1ed57-3186-442c-8132-0f6a7fcc1526" (UID: "ccc1ed57-3186-442c-8132-0f6a7fcc1526"). InnerVolumeSpecName "kube-api-access-j7rdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.093214 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccc1ed57-3186-442c-8132-0f6a7fcc1526-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "ccc1ed57-3186-442c-8132-0f6a7fcc1526" (UID: "ccc1ed57-3186-442c-8132-0f6a7fcc1526"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.171717 4861 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ccc1ed57-3186-442c-8132-0f6a7fcc1526-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.171791 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7rdd\" (UniqueName: \"kubernetes.io/projected/ccc1ed57-3186-442c-8132-0f6a7fcc1526-kube-api-access-j7rdd\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.671449 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8hmnw" event={"ID":"ccc1ed57-3186-442c-8132-0f6a7fcc1526","Type":"ContainerDied","Data":"bad993859193183b65dd0b31ee5976cda890bdbc59da3cbff4a554730d9a9cd6"} Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.671498 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bad993859193183b65dd0b31ee5976cda890bdbc59da3cbff4a554730d9a9cd6" Jan 29 06:47:42 crc kubenswrapper[4861]: I0129 06:47:42.671536 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8hmnw" Jan 29 06:47:44 crc kubenswrapper[4861]: I0129 06:47:44.470641 4861 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 06:47:46 crc kubenswrapper[4861]: I0129 06:47:46.419894 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wcmfx" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.614700 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6"] Jan 29 06:47:48 crc kubenswrapper[4861]: E0129 06:47:48.615115 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccc1ed57-3186-442c-8132-0f6a7fcc1526" containerName="storage" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.615127 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccc1ed57-3186-442c-8132-0f6a7fcc1526" containerName="storage" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.615227 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccc1ed57-3186-442c-8132-0f6a7fcc1526" containerName="storage" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.615858 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.627201 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.632703 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6"] Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.701197 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.701259 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.701328 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdhdx\" (UniqueName: \"kubernetes.io/projected/9dede211-65a8-4376-b8cb-2f692702b30d-kube-api-access-sdhdx\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.802271 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.802535 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.802665 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdhdx\" (UniqueName: \"kubernetes.io/projected/9dede211-65a8-4376-b8cb-2f692702b30d-kube-api-access-sdhdx\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.803307 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.803329 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.828368 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdhdx\" (UniqueName: \"kubernetes.io/projected/9dede211-65a8-4376-b8cb-2f692702b30d-kube-api-access-sdhdx\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:48 crc kubenswrapper[4861]: I0129 06:47:48.939905 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:49 crc kubenswrapper[4861]: I0129 06:47:49.212725 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6"] Jan 29 06:47:49 crc kubenswrapper[4861]: I0129 06:47:49.711330 4861 generic.go:334] "Generic (PLEG): container finished" podID="9dede211-65a8-4376-b8cb-2f692702b30d" containerID="9258a28e6bd39e2f08c3bb532f99905b2a895397087e68d52b680ad7ef0d66d8" exitCode=0 Jan 29 06:47:49 crc kubenswrapper[4861]: I0129 06:47:49.711397 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" event={"ID":"9dede211-65a8-4376-b8cb-2f692702b30d","Type":"ContainerDied","Data":"9258a28e6bd39e2f08c3bb532f99905b2a895397087e68d52b680ad7ef0d66d8"} Jan 29 06:47:49 crc kubenswrapper[4861]: I0129 06:47:49.711701 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" event={"ID":"9dede211-65a8-4376-b8cb-2f692702b30d","Type":"ContainerStarted","Data":"3a1c1ce1cd66ec777eb3a97d5db0521fd0e00e0ba25e0ead34ae693438b2226a"} Jan 29 06:47:49 crc kubenswrapper[4861]: E0129 06:47:49.740382 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9dede211_65a8_4376_b8cb_2f692702b30d.slice/crio-9258a28e6bd39e2f08c3bb532f99905b2a895397087e68d52b680ad7ef0d66d8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9dede211_65a8_4376_b8cb_2f692702b30d.slice/crio-conmon-9258a28e6bd39e2f08c3bb532f99905b2a895397087e68d52b680ad7ef0d66d8.scope\": RecentStats: unable to find data in memory cache]" Jan 29 06:47:50 crc kubenswrapper[4861]: I0129 06:47:50.856120 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r7zcg"] Jan 29 06:47:50 crc kubenswrapper[4861]: I0129 06:47:50.861569 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:50 crc kubenswrapper[4861]: I0129 06:47:50.867808 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r7zcg"] Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.037854 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-catalog-content\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.038064 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8m87\" (UniqueName: \"kubernetes.io/projected/ff5f80af-88c2-4679-8da9-614d2a937eaf-kube-api-access-r8m87\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.038205 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-utilities\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.139160 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-catalog-content\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.139288 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8m87\" (UniqueName: \"kubernetes.io/projected/ff5f80af-88c2-4679-8da9-614d2a937eaf-kube-api-access-r8m87\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.139346 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-utilities\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.139930 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-catalog-content\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.139949 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-utilities\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.163955 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8m87\" (UniqueName: \"kubernetes.io/projected/ff5f80af-88c2-4679-8da9-614d2a937eaf-kube-api-access-r8m87\") pod \"redhat-operators-r7zcg\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.220653 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.401554 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r7zcg"] Jan 29 06:47:51 crc kubenswrapper[4861]: W0129 06:47:51.407169 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff5f80af_88c2_4679_8da9_614d2a937eaf.slice/crio-1b45e81d4919eab9eb8e56694c1bf133e4b9cf22f6b9a001237695375142eff1 WatchSource:0}: Error finding container 1b45e81d4919eab9eb8e56694c1bf133e4b9cf22f6b9a001237695375142eff1: Status 404 returned error can't find the container with id 1b45e81d4919eab9eb8e56694c1bf133e4b9cf22f6b9a001237695375142eff1 Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.723597 4861 generic.go:334] "Generic (PLEG): container finished" podID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerID="6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628" exitCode=0 Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.723707 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7zcg" event={"ID":"ff5f80af-88c2-4679-8da9-614d2a937eaf","Type":"ContainerDied","Data":"6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628"} Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.724006 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7zcg" event={"ID":"ff5f80af-88c2-4679-8da9-614d2a937eaf","Type":"ContainerStarted","Data":"1b45e81d4919eab9eb8e56694c1bf133e4b9cf22f6b9a001237695375142eff1"} Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.729016 4861 generic.go:334] "Generic (PLEG): container finished" podID="9dede211-65a8-4376-b8cb-2f692702b30d" containerID="d4df5e42e3f6cd3f5b5d1716f83bd7481b8a3a539068b9d2b744c70a15d73283" exitCode=0 Jan 29 06:47:51 crc kubenswrapper[4861]: I0129 06:47:51.729045 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" event={"ID":"9dede211-65a8-4376-b8cb-2f692702b30d","Type":"ContainerDied","Data":"d4df5e42e3f6cd3f5b5d1716f83bd7481b8a3a539068b9d2b744c70a15d73283"} Jan 29 06:47:52 crc kubenswrapper[4861]: I0129 06:47:52.738878 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7zcg" event={"ID":"ff5f80af-88c2-4679-8da9-614d2a937eaf","Type":"ContainerStarted","Data":"ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c"} Jan 29 06:47:52 crc kubenswrapper[4861]: I0129 06:47:52.741941 4861 generic.go:334] "Generic (PLEG): container finished" podID="9dede211-65a8-4376-b8cb-2f692702b30d" containerID="911232d8e49163622230ce0f15f2450ebb0ec391dbd9b1e34f4a3099964e8c33" exitCode=0 Jan 29 06:47:52 crc kubenswrapper[4861]: I0129 06:47:52.741992 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" event={"ID":"9dede211-65a8-4376-b8cb-2f692702b30d","Type":"ContainerDied","Data":"911232d8e49163622230ce0f15f2450ebb0ec391dbd9b1e34f4a3099964e8c33"} Jan 29 06:47:53 crc kubenswrapper[4861]: I0129 06:47:53.751842 4861 generic.go:334] "Generic (PLEG): container finished" podID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerID="ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c" exitCode=0 Jan 29 06:47:53 crc kubenswrapper[4861]: I0129 06:47:53.751931 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7zcg" event={"ID":"ff5f80af-88c2-4679-8da9-614d2a937eaf","Type":"ContainerDied","Data":"ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c"} Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.130582 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.286061 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-bundle\") pod \"9dede211-65a8-4376-b8cb-2f692702b30d\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.286548 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-util\") pod \"9dede211-65a8-4376-b8cb-2f692702b30d\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.286693 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdhdx\" (UniqueName: \"kubernetes.io/projected/9dede211-65a8-4376-b8cb-2f692702b30d-kube-api-access-sdhdx\") pod \"9dede211-65a8-4376-b8cb-2f692702b30d\" (UID: \"9dede211-65a8-4376-b8cb-2f692702b30d\") " Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.286854 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-bundle" (OuterVolumeSpecName: "bundle") pod "9dede211-65a8-4376-b8cb-2f692702b30d" (UID: "9dede211-65a8-4376-b8cb-2f692702b30d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.287351 4861 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.295870 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dede211-65a8-4376-b8cb-2f692702b30d-kube-api-access-sdhdx" (OuterVolumeSpecName: "kube-api-access-sdhdx") pod "9dede211-65a8-4376-b8cb-2f692702b30d" (UID: "9dede211-65a8-4376-b8cb-2f692702b30d"). InnerVolumeSpecName "kube-api-access-sdhdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.316318 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-util" (OuterVolumeSpecName: "util") pod "9dede211-65a8-4376-b8cb-2f692702b30d" (UID: "9dede211-65a8-4376-b8cb-2f692702b30d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.389373 4861 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9dede211-65a8-4376-b8cb-2f692702b30d-util\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.389443 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdhdx\" (UniqueName: \"kubernetes.io/projected/9dede211-65a8-4376-b8cb-2f692702b30d-kube-api-access-sdhdx\") on node \"crc\" DevicePath \"\"" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.762995 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" event={"ID":"9dede211-65a8-4376-b8cb-2f692702b30d","Type":"ContainerDied","Data":"3a1c1ce1cd66ec777eb3a97d5db0521fd0e00e0ba25e0ead34ae693438b2226a"} Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.763047 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.763130 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a1c1ce1cd66ec777eb3a97d5db0521fd0e00e0ba25e0ead34ae693438b2226a" Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.766551 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7zcg" event={"ID":"ff5f80af-88c2-4679-8da9-614d2a937eaf","Type":"ContainerStarted","Data":"4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6"} Jan 29 06:47:54 crc kubenswrapper[4861]: I0129 06:47:54.799079 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r7zcg" podStartSLOduration=2.355741432 podStartE2EDuration="4.799051819s" podCreationTimestamp="2026-01-29 06:47:50 +0000 UTC" firstStartedPulling="2026-01-29 06:47:51.72500797 +0000 UTC m=+763.396502527" lastFinishedPulling="2026-01-29 06:47:54.168318317 +0000 UTC m=+765.839812914" observedRunningTime="2026-01-29 06:47:54.794396027 +0000 UTC m=+766.465890614" watchObservedRunningTime="2026-01-29 06:47:54.799051819 +0000 UTC m=+766.470546386" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.165720 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-8cgg9"] Jan 29 06:47:58 crc kubenswrapper[4861]: E0129 06:47:58.166130 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dede211-65a8-4376-b8cb-2f692702b30d" containerName="util" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.166141 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dede211-65a8-4376-b8cb-2f692702b30d" containerName="util" Jan 29 06:47:58 crc kubenswrapper[4861]: E0129 06:47:58.166154 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dede211-65a8-4376-b8cb-2f692702b30d" containerName="extract" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.166160 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dede211-65a8-4376-b8cb-2f692702b30d" containerName="extract" Jan 29 06:47:58 crc kubenswrapper[4861]: E0129 06:47:58.166172 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dede211-65a8-4376-b8cb-2f692702b30d" containerName="pull" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.166186 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dede211-65a8-4376-b8cb-2f692702b30d" containerName="pull" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.166269 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dede211-65a8-4376-b8cb-2f692702b30d" containerName="extract" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.166649 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-8cgg9" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.168341 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.168423 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-gsq8m" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.168682 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.178225 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-8cgg9"] Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.183649 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls9b6\" (UniqueName: \"kubernetes.io/projected/32bcf48d-749b-4e22-b0a1-7465f42685be-kube-api-access-ls9b6\") pod \"nmstate-operator-646758c888-8cgg9\" (UID: \"32bcf48d-749b-4e22-b0a1-7465f42685be\") " pod="openshift-nmstate/nmstate-operator-646758c888-8cgg9" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.284579 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls9b6\" (UniqueName: \"kubernetes.io/projected/32bcf48d-749b-4e22-b0a1-7465f42685be-kube-api-access-ls9b6\") pod \"nmstate-operator-646758c888-8cgg9\" (UID: \"32bcf48d-749b-4e22-b0a1-7465f42685be\") " pod="openshift-nmstate/nmstate-operator-646758c888-8cgg9" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.300585 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls9b6\" (UniqueName: \"kubernetes.io/projected/32bcf48d-749b-4e22-b0a1-7465f42685be-kube-api-access-ls9b6\") pod \"nmstate-operator-646758c888-8cgg9\" (UID: \"32bcf48d-749b-4e22-b0a1-7465f42685be\") " pod="openshift-nmstate/nmstate-operator-646758c888-8cgg9" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.479775 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-8cgg9" Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.763822 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-8cgg9"] Jan 29 06:47:58 crc kubenswrapper[4861]: W0129 06:47:58.773417 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32bcf48d_749b_4e22_b0a1_7465f42685be.slice/crio-c6e10c8b13118b38687c0b116b68acd78c9e553c67c6c98f3d5cf32c192e6525 WatchSource:0}: Error finding container c6e10c8b13118b38687c0b116b68acd78c9e553c67c6c98f3d5cf32c192e6525: Status 404 returned error can't find the container with id c6e10c8b13118b38687c0b116b68acd78c9e553c67c6c98f3d5cf32c192e6525 Jan 29 06:47:58 crc kubenswrapper[4861]: I0129 06:47:58.792646 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-8cgg9" event={"ID":"32bcf48d-749b-4e22-b0a1-7465f42685be","Type":"ContainerStarted","Data":"c6e10c8b13118b38687c0b116b68acd78c9e553c67c6c98f3d5cf32c192e6525"} Jan 29 06:48:00 crc kubenswrapper[4861]: I0129 06:48:00.630320 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:48:00 crc kubenswrapper[4861]: I0129 06:48:00.630627 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:48:00 crc kubenswrapper[4861]: I0129 06:48:00.630670 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:48:00 crc kubenswrapper[4861]: I0129 06:48:00.631249 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f4deb5daa5740edb0e7467d9109be2012b4d8eeb7a5057275a40485d2be7713"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 06:48:00 crc kubenswrapper[4861]: I0129 06:48:00.631292 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://7f4deb5daa5740edb0e7467d9109be2012b4d8eeb7a5057275a40485d2be7713" gracePeriod=600 Jan 29 06:48:00 crc kubenswrapper[4861]: I0129 06:48:00.810643 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="7f4deb5daa5740edb0e7467d9109be2012b4d8eeb7a5057275a40485d2be7713" exitCode=0 Jan 29 06:48:00 crc kubenswrapper[4861]: I0129 06:48:00.810731 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"7f4deb5daa5740edb0e7467d9109be2012b4d8eeb7a5057275a40485d2be7713"} Jan 29 06:48:00 crc kubenswrapper[4861]: I0129 06:48:00.810866 4861 scope.go:117] "RemoveContainer" containerID="0d12901ad2c4d552214db203d9884d3b0f3d1a351d3daee275dcc60b3d88c93d" Jan 29 06:48:01 crc kubenswrapper[4861]: I0129 06:48:01.221348 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:48:01 crc kubenswrapper[4861]: I0129 06:48:01.221690 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:48:01 crc kubenswrapper[4861]: I0129 06:48:01.826059 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-8cgg9" event={"ID":"32bcf48d-749b-4e22-b0a1-7465f42685be","Type":"ContainerStarted","Data":"1e527565a654a36c0901536a57bf5e7e77f9c2880ffdf44a70ef59148e9aee4a"} Jan 29 06:48:01 crc kubenswrapper[4861]: I0129 06:48:01.831748 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"a3e571fb457ea966d33ae87dfd58f64d47243c7a436da1c6aa743ed114c9efd5"} Jan 29 06:48:01 crc kubenswrapper[4861]: I0129 06:48:01.850194 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-8cgg9" podStartSLOduration=1.833902524 podStartE2EDuration="3.85017599s" podCreationTimestamp="2026-01-29 06:47:58 +0000 UTC" firstStartedPulling="2026-01-29 06:47:58.776169392 +0000 UTC m=+770.447663949" lastFinishedPulling="2026-01-29 06:48:00.792442858 +0000 UTC m=+772.463937415" observedRunningTime="2026-01-29 06:48:01.849312957 +0000 UTC m=+773.520807564" watchObservedRunningTime="2026-01-29 06:48:01.85017599 +0000 UTC m=+773.521670577" Jan 29 06:48:02 crc kubenswrapper[4861]: I0129 06:48:02.267485 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-r7zcg" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="registry-server" probeResult="failure" output=< Jan 29 06:48:02 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 06:48:02 crc kubenswrapper[4861]: > Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.567244 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-dskl8"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.568303 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.570725 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-qqg9p" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.588978 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-dskl8"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.589026 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.589676 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.598661 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-ttsz5"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.599461 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.614445 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.622753 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tjfp\" (UniqueName: \"kubernetes.io/projected/d9d5a91c-c0a2-4472-a844-482db18355ae-kube-api-access-8tjfp\") pod \"nmstate-metrics-54757c584b-dskl8\" (UID: \"d9d5a91c-c0a2-4472-a844-482db18355ae\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.622814 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/219139a6-7711-4a83-a2a2-c8901e8195b6-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-9cnf6\" (UID: \"219139a6-7711-4a83-a2a2-c8901e8195b6\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.622833 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-nmstate-lock\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.623018 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9dc9\" (UniqueName: \"kubernetes.io/projected/219139a6-7711-4a83-a2a2-c8901e8195b6-kube-api-access-x9dc9\") pod \"nmstate-webhook-8474b5b9d8-9cnf6\" (UID: \"219139a6-7711-4a83-a2a2-c8901e8195b6\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.623119 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-dbus-socket\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.623335 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwbgr\" (UniqueName: \"kubernetes.io/projected/059322b0-c68b-4446-ae78-5159dfd3606d-kube-api-access-cwbgr\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.623436 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-ovs-socket\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.648689 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.725420 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-nmstate-lock\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.725507 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9dc9\" (UniqueName: \"kubernetes.io/projected/219139a6-7711-4a83-a2a2-c8901e8195b6-kube-api-access-x9dc9\") pod \"nmstate-webhook-8474b5b9d8-9cnf6\" (UID: \"219139a6-7711-4a83-a2a2-c8901e8195b6\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.725547 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-dbus-socket\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.725581 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwbgr\" (UniqueName: \"kubernetes.io/projected/059322b0-c68b-4446-ae78-5159dfd3606d-kube-api-access-cwbgr\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.725602 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-ovs-socket\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.725644 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tjfp\" (UniqueName: \"kubernetes.io/projected/d9d5a91c-c0a2-4472-a844-482db18355ae-kube-api-access-8tjfp\") pod \"nmstate-metrics-54757c584b-dskl8\" (UID: \"d9d5a91c-c0a2-4472-a844-482db18355ae\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.725670 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/219139a6-7711-4a83-a2a2-c8901e8195b6-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-9cnf6\" (UID: \"219139a6-7711-4a83-a2a2-c8901e8195b6\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.726912 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-ovs-socket\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.727333 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-nmstate-lock\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.727392 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/059322b0-c68b-4446-ae78-5159dfd3606d-dbus-socket\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.730752 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.731798 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.740025 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.740304 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-l69vh" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.741050 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.748138 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/219139a6-7711-4a83-a2a2-c8901e8195b6-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-9cnf6\" (UID: \"219139a6-7711-4a83-a2a2-c8901e8195b6\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.753302 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tjfp\" (UniqueName: \"kubernetes.io/projected/d9d5a91c-c0a2-4472-a844-482db18355ae-kube-api-access-8tjfp\") pod \"nmstate-metrics-54757c584b-dskl8\" (UID: \"d9d5a91c-c0a2-4472-a844-482db18355ae\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.754058 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9dc9\" (UniqueName: \"kubernetes.io/projected/219139a6-7711-4a83-a2a2-c8901e8195b6-kube-api-access-x9dc9\") pod \"nmstate-webhook-8474b5b9d8-9cnf6\" (UID: \"219139a6-7711-4a83-a2a2-c8901e8195b6\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.756028 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwbgr\" (UniqueName: \"kubernetes.io/projected/059322b0-c68b-4446-ae78-5159dfd3606d-kube-api-access-cwbgr\") pod \"nmstate-handler-ttsz5\" (UID: \"059322b0-c68b-4446-ae78-5159dfd3606d\") " pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.773559 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.827911 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4934a549-9f85-465d-88cd-ea90dafe35d8-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.827958 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4934a549-9f85-465d-88cd-ea90dafe35d8-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.828122 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q482n\" (UniqueName: \"kubernetes.io/projected/4934a549-9f85-465d-88cd-ea90dafe35d8-kube-api-access-q482n\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.904302 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-85b6884698-hn9b2"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.905146 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.918498 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-85b6884698-hn9b2"] Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.927799 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.928783 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-oauth-config\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.928826 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmmhh\" (UniqueName: \"kubernetes.io/projected/8835a637-f36a-455e-ba99-d893dbf4ba1f-kube-api-access-xmmhh\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.928849 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-serving-cert\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.928947 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4934a549-9f85-465d-88cd-ea90dafe35d8-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.928976 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-service-ca\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.928998 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4934a549-9f85-465d-88cd-ea90dafe35d8-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.929020 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-oauth-serving-cert\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.929049 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-config\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:08 crc kubenswrapper[4861]: E0129 06:48:08.929132 4861 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 29 06:48:08 crc kubenswrapper[4861]: E0129 06:48:08.929256 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4934a549-9f85-465d-88cd-ea90dafe35d8-plugin-serving-cert podName:4934a549-9f85-465d-88cd-ea90dafe35d8 nodeName:}" failed. No retries permitted until 2026-01-29 06:48:09.429238411 +0000 UTC m=+781.100732968 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/4934a549-9f85-465d-88cd-ea90dafe35d8-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-xgs7z" (UID: "4934a549-9f85-465d-88cd-ea90dafe35d8") : secret "plugin-serving-cert" not found Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.929301 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-trusted-ca-bundle\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.929365 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q482n\" (UniqueName: \"kubernetes.io/projected/4934a549-9f85-465d-88cd-ea90dafe35d8-kube-api-access-q482n\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.930009 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/4934a549-9f85-465d-88cd-ea90dafe35d8-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.949024 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q482n\" (UniqueName: \"kubernetes.io/projected/4934a549-9f85-465d-88cd-ea90dafe35d8-kube-api-access-q482n\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.954275 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:08 crc kubenswrapper[4861]: I0129 06:48:08.962662 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.030912 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-serving-cert\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.030996 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-service-ca\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.031031 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-oauth-serving-cert\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.031057 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-config\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.031131 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-trusted-ca-bundle\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.031173 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-oauth-config\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.031198 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmmhh\" (UniqueName: \"kubernetes.io/projected/8835a637-f36a-455e-ba99-d893dbf4ba1f-kube-api-access-xmmhh\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.032338 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-oauth-serving-cert\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.032340 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-config\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.032990 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-service-ca\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.033247 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8835a637-f36a-455e-ba99-d893dbf4ba1f-trusted-ca-bundle\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.038575 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-serving-cert\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.038903 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8835a637-f36a-455e-ba99-d893dbf4ba1f-console-oauth-config\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.047301 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmmhh\" (UniqueName: \"kubernetes.io/projected/8835a637-f36a-455e-ba99-d893dbf4ba1f-kube-api-access-xmmhh\") pod \"console-85b6884698-hn9b2\" (UID: \"8835a637-f36a-455e-ba99-d893dbf4ba1f\") " pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.144850 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-dskl8"] Jan 29 06:48:09 crc kubenswrapper[4861]: W0129 06:48:09.153247 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd9d5a91c_c0a2_4472_a844_482db18355ae.slice/crio-d27d5cee9344a5b4577ab59b3fd8086387a257e6168ed2e809f45449ae092aec WatchSource:0}: Error finding container d27d5cee9344a5b4577ab59b3fd8086387a257e6168ed2e809f45449ae092aec: Status 404 returned error can't find the container with id d27d5cee9344a5b4577ab59b3fd8086387a257e6168ed2e809f45449ae092aec Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.184096 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6"] Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.220558 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.444395 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4934a549-9f85-465d-88cd-ea90dafe35d8-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.448324 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/4934a549-9f85-465d-88cd-ea90dafe35d8-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-xgs7z\" (UID: \"4934a549-9f85-465d-88cd-ea90dafe35d8\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.542499 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-85b6884698-hn9b2"] Jan 29 06:48:09 crc kubenswrapper[4861]: W0129 06:48:09.550049 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8835a637_f36a_455e_ba99_d893dbf4ba1f.slice/crio-54d2fef6bdc3f762d88125c622f321ee39e6881b75f828238cb15cc8d452e963 WatchSource:0}: Error finding container 54d2fef6bdc3f762d88125c622f321ee39e6881b75f828238cb15cc8d452e963: Status 404 returned error can't find the container with id 54d2fef6bdc3f762d88125c622f321ee39e6881b75f828238cb15cc8d452e963 Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.693247 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-l69vh" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.702399 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.884006 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" event={"ID":"219139a6-7711-4a83-a2a2-c8901e8195b6","Type":"ContainerStarted","Data":"ab7de716c51d225ba1c3462ddba71d8c5d9d90d990c7d1cc5b0a6bb7f429c6db"} Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.885882 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-ttsz5" event={"ID":"059322b0-c68b-4446-ae78-5159dfd3606d","Type":"ContainerStarted","Data":"14df3ed55432b31747f474ac817ed7208f08960e5df20d034bc12c844248fe15"} Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.890027 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-85b6884698-hn9b2" event={"ID":"8835a637-f36a-455e-ba99-d893dbf4ba1f","Type":"ContainerStarted","Data":"54d2fef6bdc3f762d88125c622f321ee39e6881b75f828238cb15cc8d452e963"} Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.892473 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" event={"ID":"d9d5a91c-c0a2-4472-a844-482db18355ae","Type":"ContainerStarted","Data":"d27d5cee9344a5b4577ab59b3fd8086387a257e6168ed2e809f45449ae092aec"} Jan 29 06:48:09 crc kubenswrapper[4861]: W0129 06:48:09.939974 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4934a549_9f85_465d_88cd_ea90dafe35d8.slice/crio-76c6061dcb9788bee798bc896eac39d84b83dae42d5e32498a5fbf12bff659f8 WatchSource:0}: Error finding container 76c6061dcb9788bee798bc896eac39d84b83dae42d5e32498a5fbf12bff659f8: Status 404 returned error can't find the container with id 76c6061dcb9788bee798bc896eac39d84b83dae42d5e32498a5fbf12bff659f8 Jan 29 06:48:09 crc kubenswrapper[4861]: I0129 06:48:09.949809 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z"] Jan 29 06:48:10 crc kubenswrapper[4861]: I0129 06:48:10.902005 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" event={"ID":"4934a549-9f85-465d-88cd-ea90dafe35d8","Type":"ContainerStarted","Data":"76c6061dcb9788bee798bc896eac39d84b83dae42d5e32498a5fbf12bff659f8"} Jan 29 06:48:10 crc kubenswrapper[4861]: I0129 06:48:10.904369 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-85b6884698-hn9b2" event={"ID":"8835a637-f36a-455e-ba99-d893dbf4ba1f","Type":"ContainerStarted","Data":"2295d05107417ae8ca2a48365392f419fd3d53ab314e39ccf316407c4b111f04"} Jan 29 06:48:10 crc kubenswrapper[4861]: I0129 06:48:10.932342 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-85b6884698-hn9b2" podStartSLOduration=2.9323104 podStartE2EDuration="2.9323104s" podCreationTimestamp="2026-01-29 06:48:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:48:10.923781465 +0000 UTC m=+782.595276042" watchObservedRunningTime="2026-01-29 06:48:10.9323104 +0000 UTC m=+782.603804957" Jan 29 06:48:11 crc kubenswrapper[4861]: I0129 06:48:11.271909 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:48:11 crc kubenswrapper[4861]: I0129 06:48:11.314230 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:48:11 crc kubenswrapper[4861]: I0129 06:48:11.497204 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r7zcg"] Jan 29 06:48:12 crc kubenswrapper[4861]: I0129 06:48:12.923337 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r7zcg" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="registry-server" containerID="cri-o://4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6" gracePeriod=2 Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.287917 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.396199 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-utilities\") pod \"ff5f80af-88c2-4679-8da9-614d2a937eaf\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.396477 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-catalog-content\") pod \"ff5f80af-88c2-4679-8da9-614d2a937eaf\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.396769 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8m87\" (UniqueName: \"kubernetes.io/projected/ff5f80af-88c2-4679-8da9-614d2a937eaf-kube-api-access-r8m87\") pod \"ff5f80af-88c2-4679-8da9-614d2a937eaf\" (UID: \"ff5f80af-88c2-4679-8da9-614d2a937eaf\") " Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.405409 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-utilities" (OuterVolumeSpecName: "utilities") pod "ff5f80af-88c2-4679-8da9-614d2a937eaf" (UID: "ff5f80af-88c2-4679-8da9-614d2a937eaf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.412702 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff5f80af-88c2-4679-8da9-614d2a937eaf-kube-api-access-r8m87" (OuterVolumeSpecName: "kube-api-access-r8m87") pod "ff5f80af-88c2-4679-8da9-614d2a937eaf" (UID: "ff5f80af-88c2-4679-8da9-614d2a937eaf"). InnerVolumeSpecName "kube-api-access-r8m87". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.499098 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.499132 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8m87\" (UniqueName: \"kubernetes.io/projected/ff5f80af-88c2-4679-8da9-614d2a937eaf-kube-api-access-r8m87\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.538146 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff5f80af-88c2-4679-8da9-614d2a937eaf" (UID: "ff5f80af-88c2-4679-8da9-614d2a937eaf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.600142 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff5f80af-88c2-4679-8da9-614d2a937eaf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.934668 4861 generic.go:334] "Generic (PLEG): container finished" podID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerID="4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6" exitCode=0 Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.934791 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r7zcg" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.934778 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7zcg" event={"ID":"ff5f80af-88c2-4679-8da9-614d2a937eaf","Type":"ContainerDied","Data":"4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6"} Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.935477 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7zcg" event={"ID":"ff5f80af-88c2-4679-8da9-614d2a937eaf","Type":"ContainerDied","Data":"1b45e81d4919eab9eb8e56694c1bf133e4b9cf22f6b9a001237695375142eff1"} Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.935524 4861 scope.go:117] "RemoveContainer" containerID="4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.940204 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" event={"ID":"219139a6-7711-4a83-a2a2-c8901e8195b6","Type":"ContainerStarted","Data":"aaca8a76b37217e5365ec08bbeee3ffc2de9bc7448e9fd5e07daf0621094dbd8"} Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.940402 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.942475 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-ttsz5" event={"ID":"059322b0-c68b-4446-ae78-5159dfd3606d","Type":"ContainerStarted","Data":"9c3da1378ceeea8c74e1c189870628c1a1b4005002517b3f4c8c23a0071b8787"} Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.942619 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.945781 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" event={"ID":"4934a549-9f85-465d-88cd-ea90dafe35d8","Type":"ContainerStarted","Data":"50e9afff4cd510e1b8917bc3547ce327465362f9ee3522edab56d8a3e5217ad7"} Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.947433 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" event={"ID":"d9d5a91c-c0a2-4472-a844-482db18355ae","Type":"ContainerStarted","Data":"57de9af0c6284f6a2862a31a7bd3d169590bcd8281b8e8521fc1d30c4445e312"} Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.961296 4861 scope.go:117] "RemoveContainer" containerID="ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.979175 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" podStartSLOduration=2.334476686 podStartE2EDuration="5.979143222s" podCreationTimestamp="2026-01-29 06:48:08 +0000 UTC" firstStartedPulling="2026-01-29 06:48:09.192962926 +0000 UTC m=+780.864457483" lastFinishedPulling="2026-01-29 06:48:12.837629462 +0000 UTC m=+784.509124019" observedRunningTime="2026-01-29 06:48:13.971133331 +0000 UTC m=+785.642627988" watchObservedRunningTime="2026-01-29 06:48:13.979143222 +0000 UTC m=+785.650637819" Jan 29 06:48:13 crc kubenswrapper[4861]: I0129 06:48:13.998300 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-ttsz5" podStartSLOduration=2.158087274 podStartE2EDuration="5.998278317s" podCreationTimestamp="2026-01-29 06:48:08 +0000 UTC" firstStartedPulling="2026-01-29 06:48:08.998942239 +0000 UTC m=+780.670436796" lastFinishedPulling="2026-01-29 06:48:12.839133262 +0000 UTC m=+784.510627839" observedRunningTime="2026-01-29 06:48:13.99046046 +0000 UTC m=+785.661955037" watchObservedRunningTime="2026-01-29 06:48:13.998278317 +0000 UTC m=+785.669772894" Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.012473 4861 scope.go:117] "RemoveContainer" containerID="6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628" Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.026158 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r7zcg"] Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.034332 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r7zcg"] Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.053884 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-xgs7z" podStartSLOduration=3.157788705 podStartE2EDuration="6.053857222s" podCreationTimestamp="2026-01-29 06:48:08 +0000 UTC" firstStartedPulling="2026-01-29 06:48:09.942649684 +0000 UTC m=+781.614144261" lastFinishedPulling="2026-01-29 06:48:12.838718221 +0000 UTC m=+784.510212778" observedRunningTime="2026-01-29 06:48:14.042876853 +0000 UTC m=+785.714371460" watchObservedRunningTime="2026-01-29 06:48:14.053857222 +0000 UTC m=+785.725351779" Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.056379 4861 scope.go:117] "RemoveContainer" containerID="4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6" Jan 29 06:48:14 crc kubenswrapper[4861]: E0129 06:48:14.063429 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6\": container with ID starting with 4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6 not found: ID does not exist" containerID="4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6" Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.063482 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6"} err="failed to get container status \"4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6\": rpc error: code = NotFound desc = could not find container \"4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6\": container with ID starting with 4d8f2d607194e686045344029f3f9e102769865e2e32909cb3fc3267086038b6 not found: ID does not exist" Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.063511 4861 scope.go:117] "RemoveContainer" containerID="ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c" Jan 29 06:48:14 crc kubenswrapper[4861]: E0129 06:48:14.064307 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c\": container with ID starting with ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c not found: ID does not exist" containerID="ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c" Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.064336 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c"} err="failed to get container status \"ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c\": rpc error: code = NotFound desc = could not find container \"ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c\": container with ID starting with ebe2867792e079767dfd85cf41c631f6fe38c1b38e74db252e43acf421de896c not found: ID does not exist" Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.064350 4861 scope.go:117] "RemoveContainer" containerID="6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628" Jan 29 06:48:14 crc kubenswrapper[4861]: E0129 06:48:14.065752 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628\": container with ID starting with 6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628 not found: ID does not exist" containerID="6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628" Jan 29 06:48:14 crc kubenswrapper[4861]: I0129 06:48:14.065798 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628"} err="failed to get container status \"6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628\": rpc error: code = NotFound desc = could not find container \"6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628\": container with ID starting with 6f4a7dba8ba74c6b5b47fa2c039cf5a92b7340e588a313b22bafeb90e55da628 not found: ID does not exist" Jan 29 06:48:15 crc kubenswrapper[4861]: I0129 06:48:15.124594 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" path="/var/lib/kubelet/pods/ff5f80af-88c2-4679-8da9-614d2a937eaf/volumes" Jan 29 06:48:15 crc kubenswrapper[4861]: I0129 06:48:15.965581 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" event={"ID":"d9d5a91c-c0a2-4472-a844-482db18355ae","Type":"ContainerStarted","Data":"bbfb02991c33158fee846c88c9bfc38a03275cc429957a96ec4096f69fb0baa6"} Jan 29 06:48:15 crc kubenswrapper[4861]: I0129 06:48:15.995261 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-dskl8" podStartSLOduration=2.098897865 podStartE2EDuration="7.995225135s" podCreationTimestamp="2026-01-29 06:48:08 +0000 UTC" firstStartedPulling="2026-01-29 06:48:09.158170848 +0000 UTC m=+780.829665405" lastFinishedPulling="2026-01-29 06:48:15.054498078 +0000 UTC m=+786.725992675" observedRunningTime="2026-01-29 06:48:15.99126924 +0000 UTC m=+787.662763837" watchObservedRunningTime="2026-01-29 06:48:15.995225135 +0000 UTC m=+787.666719742" Jan 29 06:48:18 crc kubenswrapper[4861]: I0129 06:48:18.988867 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-ttsz5" Jan 29 06:48:19 crc kubenswrapper[4861]: I0129 06:48:19.220821 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:19 crc kubenswrapper[4861]: I0129 06:48:19.221357 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:19 crc kubenswrapper[4861]: I0129 06:48:19.241252 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:19 crc kubenswrapper[4861]: I0129 06:48:19.999211 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-85b6884698-hn9b2" Jan 29 06:48:20 crc kubenswrapper[4861]: I0129 06:48:20.070920 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-hrbzh"] Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.134999 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5mx87"] Jan 29 06:48:28 crc kubenswrapper[4861]: E0129 06:48:28.136113 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="extract-utilities" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.136130 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="extract-utilities" Jan 29 06:48:28 crc kubenswrapper[4861]: E0129 06:48:28.136157 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="registry-server" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.136167 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="registry-server" Jan 29 06:48:28 crc kubenswrapper[4861]: E0129 06:48:28.136179 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="extract-content" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.136187 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="extract-content" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.136301 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff5f80af-88c2-4679-8da9-614d2a937eaf" containerName="registry-server" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.138908 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.148847 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mx87"] Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.229806 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-catalog-content\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.229933 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njtld\" (UniqueName: \"kubernetes.io/projected/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-kube-api-access-njtld\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.229960 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-utilities\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.330746 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njtld\" (UniqueName: \"kubernetes.io/projected/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-kube-api-access-njtld\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.330847 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-utilities\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.330931 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-catalog-content\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.331671 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-utilities\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.331726 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-catalog-content\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.363066 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njtld\" (UniqueName: \"kubernetes.io/projected/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-kube-api-access-njtld\") pod \"redhat-marketplace-5mx87\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.467315 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.742034 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mx87"] Jan 29 06:48:28 crc kubenswrapper[4861]: I0129 06:48:28.960645 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-9cnf6" Jan 29 06:48:29 crc kubenswrapper[4861]: I0129 06:48:29.058508 4861 generic.go:334] "Generic (PLEG): container finished" podID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerID="bf829649ede4976f553d8c0806f90a22649e88b73619cbe8d75c59cd53a3f61a" exitCode=0 Jan 29 06:48:29 crc kubenswrapper[4861]: I0129 06:48:29.058568 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mx87" event={"ID":"3d9d2a52-9bed-4e2c-a553-5069cbc5be00","Type":"ContainerDied","Data":"bf829649ede4976f553d8c0806f90a22649e88b73619cbe8d75c59cd53a3f61a"} Jan 29 06:48:29 crc kubenswrapper[4861]: I0129 06:48:29.058604 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mx87" event={"ID":"3d9d2a52-9bed-4e2c-a553-5069cbc5be00","Type":"ContainerStarted","Data":"d5ba503eac780c6f49dba66c477d28860ce9727bd6babd44f6ecb4c7c314f936"} Jan 29 06:48:30 crc kubenswrapper[4861]: I0129 06:48:30.070964 4861 generic.go:334] "Generic (PLEG): container finished" podID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerID="66947fb4488f358bcb0aa72e1ac6d54e83719de682534110431003dda7dd32b6" exitCode=0 Jan 29 06:48:30 crc kubenswrapper[4861]: I0129 06:48:30.071027 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mx87" event={"ID":"3d9d2a52-9bed-4e2c-a553-5069cbc5be00","Type":"ContainerDied","Data":"66947fb4488f358bcb0aa72e1ac6d54e83719de682534110431003dda7dd32b6"} Jan 29 06:48:31 crc kubenswrapper[4861]: I0129 06:48:31.082632 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mx87" event={"ID":"3d9d2a52-9bed-4e2c-a553-5069cbc5be00","Type":"ContainerStarted","Data":"009a69332fa991899069ae864e802fa3d67246d29f81856365ced3f9651914e2"} Jan 29 06:48:38 crc kubenswrapper[4861]: I0129 06:48:38.468908 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:38 crc kubenswrapper[4861]: I0129 06:48:38.469730 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:38 crc kubenswrapper[4861]: I0129 06:48:38.550904 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:38 crc kubenswrapper[4861]: I0129 06:48:38.580723 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5mx87" podStartSLOduration=9.152969203 podStartE2EDuration="10.580706871s" podCreationTimestamp="2026-01-29 06:48:28 +0000 UTC" firstStartedPulling="2026-01-29 06:48:29.060855651 +0000 UTC m=+800.732350208" lastFinishedPulling="2026-01-29 06:48:30.488593309 +0000 UTC m=+802.160087876" observedRunningTime="2026-01-29 06:48:31.105323082 +0000 UTC m=+802.776817649" watchObservedRunningTime="2026-01-29 06:48:38.580706871 +0000 UTC m=+810.252201468" Jan 29 06:48:39 crc kubenswrapper[4861]: I0129 06:48:39.195502 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:39 crc kubenswrapper[4861]: I0129 06:48:39.244900 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mx87"] Jan 29 06:48:41 crc kubenswrapper[4861]: I0129 06:48:41.151328 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5mx87" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerName="registry-server" containerID="cri-o://009a69332fa991899069ae864e802fa3d67246d29f81856365ced3f9651914e2" gracePeriod=2 Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.159482 4861 generic.go:334] "Generic (PLEG): container finished" podID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerID="009a69332fa991899069ae864e802fa3d67246d29f81856365ced3f9651914e2" exitCode=0 Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.160198 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mx87" event={"ID":"3d9d2a52-9bed-4e2c-a553-5069cbc5be00","Type":"ContainerDied","Data":"009a69332fa991899069ae864e802fa3d67246d29f81856365ced3f9651914e2"} Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.319108 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.350974 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-catalog-content\") pod \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.351036 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njtld\" (UniqueName: \"kubernetes.io/projected/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-kube-api-access-njtld\") pod \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.351090 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-utilities\") pod \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\" (UID: \"3d9d2a52-9bed-4e2c-a553-5069cbc5be00\") " Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.352516 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-utilities" (OuterVolumeSpecName: "utilities") pod "3d9d2a52-9bed-4e2c-a553-5069cbc5be00" (UID: "3d9d2a52-9bed-4e2c-a553-5069cbc5be00"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.352715 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.360832 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-kube-api-access-njtld" (OuterVolumeSpecName: "kube-api-access-njtld") pod "3d9d2a52-9bed-4e2c-a553-5069cbc5be00" (UID: "3d9d2a52-9bed-4e2c-a553-5069cbc5be00"). InnerVolumeSpecName "kube-api-access-njtld". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.381776 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d9d2a52-9bed-4e2c-a553-5069cbc5be00" (UID: "3d9d2a52-9bed-4e2c-a553-5069cbc5be00"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.453945 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:42 crc kubenswrapper[4861]: I0129 06:48:42.453983 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njtld\" (UniqueName: \"kubernetes.io/projected/3d9d2a52-9bed-4e2c-a553-5069cbc5be00-kube-api-access-njtld\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:43 crc kubenswrapper[4861]: I0129 06:48:43.172022 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mx87" event={"ID":"3d9d2a52-9bed-4e2c-a553-5069cbc5be00","Type":"ContainerDied","Data":"d5ba503eac780c6f49dba66c477d28860ce9727bd6babd44f6ecb4c7c314f936"} Jan 29 06:48:43 crc kubenswrapper[4861]: I0129 06:48:43.172229 4861 scope.go:117] "RemoveContainer" containerID="009a69332fa991899069ae864e802fa3d67246d29f81856365ced3f9651914e2" Jan 29 06:48:43 crc kubenswrapper[4861]: I0129 06:48:43.172131 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5mx87" Jan 29 06:48:43 crc kubenswrapper[4861]: I0129 06:48:43.196861 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mx87"] Jan 29 06:48:43 crc kubenswrapper[4861]: I0129 06:48:43.202018 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mx87"] Jan 29 06:48:43 crc kubenswrapper[4861]: I0129 06:48:43.208726 4861 scope.go:117] "RemoveContainer" containerID="66947fb4488f358bcb0aa72e1ac6d54e83719de682534110431003dda7dd32b6" Jan 29 06:48:43 crc kubenswrapper[4861]: I0129 06:48:43.240011 4861 scope.go:117] "RemoveContainer" containerID="bf829649ede4976f553d8c0806f90a22649e88b73619cbe8d75c59cd53a3f61a" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.127919 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" path="/var/lib/kubelet/pods/3d9d2a52-9bed-4e2c-a553-5069cbc5be00/volumes" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.135865 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-hrbzh" podUID="3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" containerName="console" containerID="cri-o://d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec" gracePeriod=15 Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.225987 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7"] Jan 29 06:48:45 crc kubenswrapper[4861]: E0129 06:48:45.226343 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerName="extract-content" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.226373 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerName="extract-content" Jan 29 06:48:45 crc kubenswrapper[4861]: E0129 06:48:45.226391 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerName="extract-utilities" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.226405 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerName="extract-utilities" Jan 29 06:48:45 crc kubenswrapper[4861]: E0129 06:48:45.226421 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerName="registry-server" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.226435 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerName="registry-server" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.226639 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d9d2a52-9bed-4e2c-a553-5069cbc5be00" containerName="registry-server" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.227972 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.230993 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.240706 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7"] Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.395339 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.395499 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.395536 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfxbf\" (UniqueName: \"kubernetes.io/projected/6296013e-e8a8-4e37-b501-77ac33f4652d-kube-api-access-mfxbf\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.497029 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.497466 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfxbf\" (UniqueName: \"kubernetes.io/projected/6296013e-e8a8-4e37-b501-77ac33f4652d-kube-api-access-mfxbf\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.497591 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.497764 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.498485 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.532590 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfxbf\" (UniqueName: \"kubernetes.io/projected/6296013e-e8a8-4e37-b501-77ac33f4652d-kube-api-access-mfxbf\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.571706 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.640842 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-hrbzh_3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed/console/0.log" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.640959 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.800968 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-service-ca\") pod \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.801086 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-oauth-config\") pod \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.801183 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-config\") pod \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.801236 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-trusted-ca-bundle\") pod \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.801256 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-serving-cert\") pod \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.801291 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szm4q\" (UniqueName: \"kubernetes.io/projected/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-kube-api-access-szm4q\") pod \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.801356 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-oauth-serving-cert\") pod \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\" (UID: \"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed\") " Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.802671 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" (UID: "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.803256 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-config" (OuterVolumeSpecName: "console-config") pod "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" (UID: "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.803309 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-service-ca" (OuterVolumeSpecName: "service-ca") pod "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" (UID: "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.803323 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" (UID: "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.809223 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-kube-api-access-szm4q" (OuterVolumeSpecName: "kube-api-access-szm4q") pod "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" (UID: "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed"). InnerVolumeSpecName "kube-api-access-szm4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.810292 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" (UID: "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.810539 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" (UID: "3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.835783 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7"] Jan 29 06:48:45 crc kubenswrapper[4861]: W0129 06:48:45.836377 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6296013e_e8a8_4e37_b501_77ac33f4652d.slice/crio-7c59b633de5f274b9b37c78467944d7fecdef122bd8863386c9141310e9547d2 WatchSource:0}: Error finding container 7c59b633de5f274b9b37c78467944d7fecdef122bd8863386c9141310e9547d2: Status 404 returned error can't find the container with id 7c59b633de5f274b9b37c78467944d7fecdef122bd8863386c9141310e9547d2 Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.902600 4861 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.902644 4861 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.902660 4861 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.902675 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szm4q\" (UniqueName: \"kubernetes.io/projected/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-kube-api-access-szm4q\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.902688 4861 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.902699 4861 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:45 crc kubenswrapper[4861]: I0129 06:48:45.902711 4861 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.203480 4861 generic.go:334] "Generic (PLEG): container finished" podID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerID="c50f59350783529d04954e217d4bf8b0b894ab5c05e7932d3bc71a8e0a4b1fc7" exitCode=0 Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.203626 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" event={"ID":"6296013e-e8a8-4e37-b501-77ac33f4652d","Type":"ContainerDied","Data":"c50f59350783529d04954e217d4bf8b0b894ab5c05e7932d3bc71a8e0a4b1fc7"} Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.203667 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" event={"ID":"6296013e-e8a8-4e37-b501-77ac33f4652d","Type":"ContainerStarted","Data":"7c59b633de5f274b9b37c78467944d7fecdef122bd8863386c9141310e9547d2"} Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.208667 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-hrbzh_3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed/console/0.log" Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.208748 4861 generic.go:334] "Generic (PLEG): container finished" podID="3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" containerID="d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec" exitCode=2 Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.208801 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-hrbzh" event={"ID":"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed","Type":"ContainerDied","Data":"d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec"} Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.208837 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-hrbzh" Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.208870 4861 scope.go:117] "RemoveContainer" containerID="d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec" Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.208848 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-hrbzh" event={"ID":"3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed","Type":"ContainerDied","Data":"9f1f982f29279d544b208e1a85f5874ae7c8e34d4967fd433cc90e573b7dfeec"} Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.238692 4861 scope.go:117] "RemoveContainer" containerID="d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec" Jan 29 06:48:46 crc kubenswrapper[4861]: E0129 06:48:46.240691 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec\": container with ID starting with d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec not found: ID does not exist" containerID="d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec" Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.240757 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec"} err="failed to get container status \"d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec\": rpc error: code = NotFound desc = could not find container \"d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec\": container with ID starting with d6eb93a15a04c4dd1cceb7b6d5f95797308d286f04f4c4db6a12be27e7a28fec not found: ID does not exist" Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.260695 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-hrbzh"] Jan 29 06:48:46 crc kubenswrapper[4861]: I0129 06:48:46.267494 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-hrbzh"] Jan 29 06:48:47 crc kubenswrapper[4861]: I0129 06:48:47.127922 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" path="/var/lib/kubelet/pods/3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed/volumes" Jan 29 06:48:48 crc kubenswrapper[4861]: I0129 06:48:48.227564 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" event={"ID":"6296013e-e8a8-4e37-b501-77ac33f4652d","Type":"ContainerStarted","Data":"263859a0f59ec1e7c03f8ee34ddf44740bffcaf29ecd21b8716f481e847b79f1"} Jan 29 06:48:49 crc kubenswrapper[4861]: I0129 06:48:49.240635 4861 generic.go:334] "Generic (PLEG): container finished" podID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerID="263859a0f59ec1e7c03f8ee34ddf44740bffcaf29ecd21b8716f481e847b79f1" exitCode=0 Jan 29 06:48:49 crc kubenswrapper[4861]: I0129 06:48:49.240756 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" event={"ID":"6296013e-e8a8-4e37-b501-77ac33f4652d","Type":"ContainerDied","Data":"263859a0f59ec1e7c03f8ee34ddf44740bffcaf29ecd21b8716f481e847b79f1"} Jan 29 06:48:50 crc kubenswrapper[4861]: I0129 06:48:50.254904 4861 generic.go:334] "Generic (PLEG): container finished" podID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerID="c1ff8d3035945c190eb0e59e8e64c44cc28d8f619e2d2b4ecae6c3a46d541b6b" exitCode=0 Jan 29 06:48:50 crc kubenswrapper[4861]: I0129 06:48:50.254956 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" event={"ID":"6296013e-e8a8-4e37-b501-77ac33f4652d","Type":"ContainerDied","Data":"c1ff8d3035945c190eb0e59e8e64c44cc28d8f619e2d2b4ecae6c3a46d541b6b"} Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.551503 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.685715 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfxbf\" (UniqueName: \"kubernetes.io/projected/6296013e-e8a8-4e37-b501-77ac33f4652d-kube-api-access-mfxbf\") pod \"6296013e-e8a8-4e37-b501-77ac33f4652d\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.685765 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-util\") pod \"6296013e-e8a8-4e37-b501-77ac33f4652d\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.685791 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-bundle\") pod \"6296013e-e8a8-4e37-b501-77ac33f4652d\" (UID: \"6296013e-e8a8-4e37-b501-77ac33f4652d\") " Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.688338 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-bundle" (OuterVolumeSpecName: "bundle") pod "6296013e-e8a8-4e37-b501-77ac33f4652d" (UID: "6296013e-e8a8-4e37-b501-77ac33f4652d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.696454 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6296013e-e8a8-4e37-b501-77ac33f4652d-kube-api-access-mfxbf" (OuterVolumeSpecName: "kube-api-access-mfxbf") pod "6296013e-e8a8-4e37-b501-77ac33f4652d" (UID: "6296013e-e8a8-4e37-b501-77ac33f4652d"). InnerVolumeSpecName "kube-api-access-mfxbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.723486 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-util" (OuterVolumeSpecName: "util") pod "6296013e-e8a8-4e37-b501-77ac33f4652d" (UID: "6296013e-e8a8-4e37-b501-77ac33f4652d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.787568 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfxbf\" (UniqueName: \"kubernetes.io/projected/6296013e-e8a8-4e37-b501-77ac33f4652d-kube-api-access-mfxbf\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.787622 4861 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-util\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:51 crc kubenswrapper[4861]: I0129 06:48:51.787646 4861 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6296013e-e8a8-4e37-b501-77ac33f4652d-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:48:52 crc kubenswrapper[4861]: I0129 06:48:52.269622 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" event={"ID":"6296013e-e8a8-4e37-b501-77ac33f4652d","Type":"ContainerDied","Data":"7c59b633de5f274b9b37c78467944d7fecdef122bd8863386c9141310e9547d2"} Jan 29 06:48:52 crc kubenswrapper[4861]: I0129 06:48:52.270135 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c59b633de5f274b9b37c78467944d7fecdef122bd8863386c9141310e9547d2" Jan 29 06:48:52 crc kubenswrapper[4861]: I0129 06:48:52.269876 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.756169 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z"] Jan 29 06:49:00 crc kubenswrapper[4861]: E0129 06:49:00.756879 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerName="util" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.756890 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerName="util" Jan 29 06:49:00 crc kubenswrapper[4861]: E0129 06:49:00.756903 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerName="pull" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.756908 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerName="pull" Jan 29 06:49:00 crc kubenswrapper[4861]: E0129 06:49:00.756917 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerName="extract" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.756923 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerName="extract" Jan 29 06:49:00 crc kubenswrapper[4861]: E0129 06:49:00.756936 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" containerName="console" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.756941 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" containerName="console" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.757060 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6296013e-e8a8-4e37-b501-77ac33f4652d" containerName="extract" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.757089 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c9a4ade-e4b4-4fc3-a69e-7e0f969ef4ed" containerName="console" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.757445 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.760898 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.760915 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.760972 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.763299 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-vjx79" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.763346 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.798487 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z"] Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.808032 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e68679d2-955e-4332-8692-ae753a55450c-apiservice-cert\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.808088 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e68679d2-955e-4332-8692-ae753a55450c-webhook-cert\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.808244 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z4w7\" (UniqueName: \"kubernetes.io/projected/e68679d2-955e-4332-8692-ae753a55450c-kube-api-access-5z4w7\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.909403 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e68679d2-955e-4332-8692-ae753a55450c-apiservice-cert\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.909468 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e68679d2-955e-4332-8692-ae753a55450c-webhook-cert\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.909525 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z4w7\" (UniqueName: \"kubernetes.io/projected/e68679d2-955e-4332-8692-ae753a55450c-kube-api-access-5z4w7\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.918709 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e68679d2-955e-4332-8692-ae753a55450c-webhook-cert\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.918725 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e68679d2-955e-4332-8692-ae753a55450c-apiservice-cert\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.928624 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z4w7\" (UniqueName: \"kubernetes.io/projected/e68679d2-955e-4332-8692-ae753a55450c-kube-api-access-5z4w7\") pod \"metallb-operator-controller-manager-6c8b689f68-d2g2z\" (UID: \"e68679d2-955e-4332-8692-ae753a55450c\") " pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.987629 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r"] Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.988435 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.990529 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.994036 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 29 06:49:00 crc kubenswrapper[4861]: I0129 06:49:00.994040 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-zcc84" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.009850 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r"] Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.080908 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.112079 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/26677778-6a62-4e26-ae66-d7bab9bcdfe6-webhook-cert\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.112338 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5td7s\" (UniqueName: \"kubernetes.io/projected/26677778-6a62-4e26-ae66-d7bab9bcdfe6-kube-api-access-5td7s\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.112402 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/26677778-6a62-4e26-ae66-d7bab9bcdfe6-apiservice-cert\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.214624 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/26677778-6a62-4e26-ae66-d7bab9bcdfe6-webhook-cert\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.214677 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5td7s\" (UniqueName: \"kubernetes.io/projected/26677778-6a62-4e26-ae66-d7bab9bcdfe6-kube-api-access-5td7s\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.214727 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/26677778-6a62-4e26-ae66-d7bab9bcdfe6-apiservice-cert\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.245407 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/26677778-6a62-4e26-ae66-d7bab9bcdfe6-apiservice-cert\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.246469 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/26677778-6a62-4e26-ae66-d7bab9bcdfe6-webhook-cert\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.259446 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5td7s\" (UniqueName: \"kubernetes.io/projected/26677778-6a62-4e26-ae66-d7bab9bcdfe6-kube-api-access-5td7s\") pod \"metallb-operator-webhook-server-b4bd6c689-wxp4r\" (UID: \"26677778-6a62-4e26-ae66-d7bab9bcdfe6\") " pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.302375 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.343943 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z"] Jan 29 06:49:01 crc kubenswrapper[4861]: I0129 06:49:01.550552 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r"] Jan 29 06:49:01 crc kubenswrapper[4861]: W0129 06:49:01.554095 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod26677778_6a62_4e26_ae66_d7bab9bcdfe6.slice/crio-0624d9a33dea174c213c83c8855c0a46d105fa4dd7e6e5e441a32b5a2b1445d3 WatchSource:0}: Error finding container 0624d9a33dea174c213c83c8855c0a46d105fa4dd7e6e5e441a32b5a2b1445d3: Status 404 returned error can't find the container with id 0624d9a33dea174c213c83c8855c0a46d105fa4dd7e6e5e441a32b5a2b1445d3 Jan 29 06:49:02 crc kubenswrapper[4861]: I0129 06:49:02.333260 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" event={"ID":"26677778-6a62-4e26-ae66-d7bab9bcdfe6","Type":"ContainerStarted","Data":"0624d9a33dea174c213c83c8855c0a46d105fa4dd7e6e5e441a32b5a2b1445d3"} Jan 29 06:49:02 crc kubenswrapper[4861]: I0129 06:49:02.335285 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" event={"ID":"e68679d2-955e-4332-8692-ae753a55450c","Type":"ContainerStarted","Data":"582268a2e3af44085916ec908ba24c7bf515e3723accccbc5b238f7ebbffeebb"} Jan 29 06:49:05 crc kubenswrapper[4861]: I0129 06:49:05.356261 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" event={"ID":"e68679d2-955e-4332-8692-ae753a55450c","Type":"ContainerStarted","Data":"40658382bad11a99bc64673684cb6a2ae9b5e4f895f87b78f7051cb6b520e71d"} Jan 29 06:49:05 crc kubenswrapper[4861]: I0129 06:49:05.358011 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:05 crc kubenswrapper[4861]: I0129 06:49:05.390493 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" podStartSLOduration=2.3073271 podStartE2EDuration="5.390471849s" podCreationTimestamp="2026-01-29 06:49:00 +0000 UTC" firstStartedPulling="2026-01-29 06:49:01.363316907 +0000 UTC m=+833.034811464" lastFinishedPulling="2026-01-29 06:49:04.446461656 +0000 UTC m=+836.117956213" observedRunningTime="2026-01-29 06:49:05.385296192 +0000 UTC m=+837.056790759" watchObservedRunningTime="2026-01-29 06:49:05.390471849 +0000 UTC m=+837.061966406" Jan 29 06:49:06 crc kubenswrapper[4861]: I0129 06:49:06.364298 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" event={"ID":"26677778-6a62-4e26-ae66-d7bab9bcdfe6","Type":"ContainerStarted","Data":"1d0cb948d1773aef93b405457d8af3420ec976a909e5503a2cf7bf4f28d95173"} Jan 29 06:49:06 crc kubenswrapper[4861]: I0129 06:49:06.385208 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" podStartSLOduration=1.740478432 podStartE2EDuration="6.385190199s" podCreationTimestamp="2026-01-29 06:49:00 +0000 UTC" firstStartedPulling="2026-01-29 06:49:01.558792341 +0000 UTC m=+833.230286898" lastFinishedPulling="2026-01-29 06:49:06.203504108 +0000 UTC m=+837.874998665" observedRunningTime="2026-01-29 06:49:06.38221652 +0000 UTC m=+838.053711097" watchObservedRunningTime="2026-01-29 06:49:06.385190199 +0000 UTC m=+838.056684756" Jan 29 06:49:07 crc kubenswrapper[4861]: I0129 06:49:07.369661 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:21 crc kubenswrapper[4861]: I0129 06:49:21.312557 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-b4bd6c689-wxp4r" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.340420 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8hhrf"] Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.344351 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.358898 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8hhrf"] Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.441316 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-utilities\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.441474 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-catalog-content\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.441520 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzbhg\" (UniqueName: \"kubernetes.io/projected/20d90439-f776-4d23-b376-a392b6c0448d-kube-api-access-dzbhg\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.543109 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-utilities\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.543213 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-catalog-content\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.543250 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzbhg\" (UniqueName: \"kubernetes.io/projected/20d90439-f776-4d23-b376-a392b6c0448d-kube-api-access-dzbhg\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.543641 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-utilities\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.543723 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-catalog-content\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.571197 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzbhg\" (UniqueName: \"kubernetes.io/projected/20d90439-f776-4d23-b376-a392b6c0448d-kube-api-access-dzbhg\") pod \"community-operators-8hhrf\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.667045 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:30 crc kubenswrapper[4861]: I0129 06:49:30.979059 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8hhrf"] Jan 29 06:49:31 crc kubenswrapper[4861]: I0129 06:49:31.549078 4861 generic.go:334] "Generic (PLEG): container finished" podID="20d90439-f776-4d23-b376-a392b6c0448d" containerID="e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7" exitCode=0 Jan 29 06:49:31 crc kubenswrapper[4861]: I0129 06:49:31.549248 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhrf" event={"ID":"20d90439-f776-4d23-b376-a392b6c0448d","Type":"ContainerDied","Data":"e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7"} Jan 29 06:49:31 crc kubenswrapper[4861]: I0129 06:49:31.549509 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhrf" event={"ID":"20d90439-f776-4d23-b376-a392b6c0448d","Type":"ContainerStarted","Data":"1d7d5789209f0b32a73b86400a1a86c2474b5200bd5d8600d7b2f8ebcbd578b9"} Jan 29 06:49:32 crc kubenswrapper[4861]: I0129 06:49:32.557520 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhrf" event={"ID":"20d90439-f776-4d23-b376-a392b6c0448d","Type":"ContainerStarted","Data":"46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e"} Jan 29 06:49:33 crc kubenswrapper[4861]: I0129 06:49:33.566374 4861 generic.go:334] "Generic (PLEG): container finished" podID="20d90439-f776-4d23-b376-a392b6c0448d" containerID="46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e" exitCode=0 Jan 29 06:49:33 crc kubenswrapper[4861]: I0129 06:49:33.566411 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhrf" event={"ID":"20d90439-f776-4d23-b376-a392b6c0448d","Type":"ContainerDied","Data":"46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e"} Jan 29 06:49:34 crc kubenswrapper[4861]: I0129 06:49:34.575183 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhrf" event={"ID":"20d90439-f776-4d23-b376-a392b6c0448d","Type":"ContainerStarted","Data":"57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82"} Jan 29 06:49:34 crc kubenswrapper[4861]: I0129 06:49:34.598286 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8hhrf" podStartSLOduration=2.143846351 podStartE2EDuration="4.598272245s" podCreationTimestamp="2026-01-29 06:49:30 +0000 UTC" firstStartedPulling="2026-01-29 06:49:31.551145028 +0000 UTC m=+863.222639595" lastFinishedPulling="2026-01-29 06:49:34.005570922 +0000 UTC m=+865.677065489" observedRunningTime="2026-01-29 06:49:34.597885995 +0000 UTC m=+866.269380552" watchObservedRunningTime="2026-01-29 06:49:34.598272245 +0000 UTC m=+866.269766802" Jan 29 06:49:40 crc kubenswrapper[4861]: I0129 06:49:40.667592 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:40 crc kubenswrapper[4861]: I0129 06:49:40.668189 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:40 crc kubenswrapper[4861]: I0129 06:49:40.738888 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.084477 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6c8b689f68-d2g2z" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.697703 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.833839 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-pwx9r"] Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.836454 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.840172 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.840534 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.841893 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7"] Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.842736 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.843184 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-pmmts" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.852733 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.863124 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7"] Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.908869 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16a3011f-4927-45e1-8748-a8baf0db3e61-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-cq4j7\" (UID: \"16a3011f-4927-45e1-8748-a8baf0db3e61\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.908924 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-reloader\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.909056 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics-certs\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.909143 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-startup\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.909163 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-conf\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.909213 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vllb\" (UniqueName: \"kubernetes.io/projected/83f5313a-3181-4f9a-a7b3-9dfbd14719be-kube-api-access-9vllb\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.909255 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-sockets\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.909296 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h266\" (UniqueName: \"kubernetes.io/projected/16a3011f-4927-45e1-8748-a8baf0db3e61-kube-api-access-5h266\") pod \"frr-k8s-webhook-server-7df86c4f6c-cq4j7\" (UID: \"16a3011f-4927-45e1-8748-a8baf0db3e61\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.909320 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.978530 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-dfg5n"] Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.979379 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-dfg5n" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.982715 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-2phxb" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.983012 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.983167 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 29 06:49:41 crc kubenswrapper[4861]: I0129 06:49:41.983293 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010245 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/54091384-0c14-42be-a638-a8abb8171ad1-metallb-excludel2\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010299 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-metrics-certs\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010328 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics-certs\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010354 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pttlq\" (UniqueName: \"kubernetes.io/projected/54091384-0c14-42be-a638-a8abb8171ad1-kube-api-access-pttlq\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010392 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-startup\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010410 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-conf\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010435 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010454 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vllb\" (UniqueName: \"kubernetes.io/projected/83f5313a-3181-4f9a-a7b3-9dfbd14719be-kube-api-access-9vllb\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010479 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-sockets\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010503 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h266\" (UniqueName: \"kubernetes.io/projected/16a3011f-4927-45e1-8748-a8baf0db3e61-kube-api-access-5h266\") pod \"frr-k8s-webhook-server-7df86c4f6c-cq4j7\" (UID: \"16a3011f-4927-45e1-8748-a8baf0db3e61\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010527 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010558 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16a3011f-4927-45e1-8748-a8baf0db3e61-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-cq4j7\" (UID: \"16a3011f-4927-45e1-8748-a8baf0db3e61\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010588 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-reloader\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.010981 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-reloader\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.011120 4861 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.011171 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics-certs podName:83f5313a-3181-4f9a-a7b3-9dfbd14719be nodeName:}" failed. No retries permitted until 2026-01-29 06:49:42.511155134 +0000 UTC m=+874.182649691 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics-certs") pod "frr-k8s-pwx9r" (UID: "83f5313a-3181-4f9a-a7b3-9dfbd14719be") : secret "frr-k8s-certs-secret" not found Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.012140 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-startup\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.012327 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-conf\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.012699 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-frr-sockets\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.013024 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.015931 4861 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.015983 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/16a3011f-4927-45e1-8748-a8baf0db3e61-cert podName:16a3011f-4927-45e1-8748-a8baf0db3e61 nodeName:}" failed. No retries permitted until 2026-01-29 06:49:42.51596625 +0000 UTC m=+874.187460807 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/16a3011f-4927-45e1-8748-a8baf0db3e61-cert") pod "frr-k8s-webhook-server-7df86c4f6c-cq4j7" (UID: "16a3011f-4927-45e1-8748-a8baf0db3e61") : secret "frr-k8s-webhook-server-cert" not found Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.028278 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-b58f5"] Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.034445 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.038667 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-b58f5"] Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.038950 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.044537 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vllb\" (UniqueName: \"kubernetes.io/projected/83f5313a-3181-4f9a-a7b3-9dfbd14719be-kube-api-access-9vllb\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.045339 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h266\" (UniqueName: \"kubernetes.io/projected/16a3011f-4927-45e1-8748-a8baf0db3e61-kube-api-access-5h266\") pod \"frr-k8s-webhook-server-7df86c4f6c-cq4j7\" (UID: \"16a3011f-4927-45e1-8748-a8baf0db3e61\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.111134 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-cert\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.111516 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/54091384-0c14-42be-a638-a8abb8171ad1-metallb-excludel2\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.111534 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-metrics-certs\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.111564 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pttlq\" (UniqueName: \"kubernetes.io/projected/54091384-0c14-42be-a638-a8abb8171ad1-kube-api-access-pttlq\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.111598 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8rnp\" (UniqueName: \"kubernetes.io/projected/55af9f43-c216-4c63-9ca4-564120262b41-kube-api-access-w8rnp\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.111615 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.111634 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-metrics-certs\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.111760 4861 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.111823 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-metrics-certs podName:54091384-0c14-42be-a638-a8abb8171ad1 nodeName:}" failed. No retries permitted until 2026-01-29 06:49:42.611785591 +0000 UTC m=+874.283280148 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-metrics-certs") pod "speaker-dfg5n" (UID: "54091384-0c14-42be-a638-a8abb8171ad1") : secret "speaker-certs-secret" not found Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.112150 4861 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.112177 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist podName:54091384-0c14-42be-a638-a8abb8171ad1 nodeName:}" failed. No retries permitted until 2026-01-29 06:49:42.612170041 +0000 UTC m=+874.283664598 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist") pod "speaker-dfg5n" (UID: "54091384-0c14-42be-a638-a8abb8171ad1") : secret "metallb-memberlist" not found Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.112291 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/54091384-0c14-42be-a638-a8abb8171ad1-metallb-excludel2\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.131726 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pttlq\" (UniqueName: \"kubernetes.io/projected/54091384-0c14-42be-a638-a8abb8171ad1-kube-api-access-pttlq\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.212673 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8rnp\" (UniqueName: \"kubernetes.io/projected/55af9f43-c216-4c63-9ca4-564120262b41-kube-api-access-w8rnp\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.212741 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-metrics-certs\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.212829 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-cert\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.212934 4861 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.213020 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-metrics-certs podName:55af9f43-c216-4c63-9ca4-564120262b41 nodeName:}" failed. No retries permitted until 2026-01-29 06:49:42.712997923 +0000 UTC m=+874.384492480 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-metrics-certs") pod "controller-6968d8fdc4-b58f5" (UID: "55af9f43-c216-4c63-9ca4-564120262b41") : secret "controller-certs-secret" not found Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.216006 4861 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.226735 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-cert\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.232952 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8rnp\" (UniqueName: \"kubernetes.io/projected/55af9f43-c216-4c63-9ca4-564120262b41-kube-api-access-w8rnp\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.518286 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16a3011f-4927-45e1-8748-a8baf0db3e61-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-cq4j7\" (UID: \"16a3011f-4927-45e1-8748-a8baf0db3e61\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.518435 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics-certs\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.523684 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/83f5313a-3181-4f9a-a7b3-9dfbd14719be-metrics-certs\") pod \"frr-k8s-pwx9r\" (UID: \"83f5313a-3181-4f9a-a7b3-9dfbd14719be\") " pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.524314 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16a3011f-4927-45e1-8748-a8baf0db3e61-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-cq4j7\" (UID: \"16a3011f-4927-45e1-8748-a8baf0db3e61\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.619680 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.619873 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-metrics-certs\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.619893 4861 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 29 06:49:42 crc kubenswrapper[4861]: E0129 06:49:42.619975 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist podName:54091384-0c14-42be-a638-a8abb8171ad1 nodeName:}" failed. No retries permitted until 2026-01-29 06:49:43.619950957 +0000 UTC m=+875.291445584 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist") pod "speaker-dfg5n" (UID: "54091384-0c14-42be-a638-a8abb8171ad1") : secret "metallb-memberlist" not found Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.623699 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-metrics-certs\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.721842 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-metrics-certs\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.727707 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/55af9f43-c216-4c63-9ca4-564120262b41-metrics-certs\") pod \"controller-6968d8fdc4-b58f5\" (UID: \"55af9f43-c216-4c63-9ca4-564120262b41\") " pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.758339 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.765921 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:42 crc kubenswrapper[4861]: I0129 06:49:42.988890 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.126453 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8hhrf"] Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.224911 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-b58f5"] Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.231259 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7"] Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.633592 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.636793 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-b58f5" event={"ID":"55af9f43-c216-4c63-9ca4-564120262b41","Type":"ContainerStarted","Data":"701406aacf2cf2287c9865fbb494cf8ee52d5a659d03f296517a1ce2353e5420"} Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.636864 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-b58f5" event={"ID":"55af9f43-c216-4c63-9ca4-564120262b41","Type":"ContainerStarted","Data":"9d41f04bbefd673d61c4503b1cfc98b7b6b788a3bcf643e21123d67ea935fc94"} Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.638145 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" event={"ID":"16a3011f-4927-45e1-8748-a8baf0db3e61","Type":"ContainerStarted","Data":"f1e34827f80c8263ade6a2fabe7d2880d806c655dfe5ed454661359eabb6af14"} Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.639784 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8hhrf" podUID="20d90439-f776-4d23-b376-a392b6c0448d" containerName="registry-server" containerID="cri-o://57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82" gracePeriod=2 Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.640284 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerStarted","Data":"740642adf9e99015300de9fc98e9077022b3a26b5bd7db33f814ff12d0f26803"} Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.642950 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/54091384-0c14-42be-a638-a8abb8171ad1-memberlist\") pod \"speaker-dfg5n\" (UID: \"54091384-0c14-42be-a638-a8abb8171ad1\") " pod="metallb-system/speaker-dfg5n" Jan 29 06:49:43 crc kubenswrapper[4861]: I0129 06:49:43.806199 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-dfg5n" Jan 29 06:49:43 crc kubenswrapper[4861]: W0129 06:49:43.828516 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54091384_0c14_42be_a638_a8abb8171ad1.slice/crio-997f0d675c048f215b7e4b7f3026915a6a12789928f45e248945b6ed2d8c7fc9 WatchSource:0}: Error finding container 997f0d675c048f215b7e4b7f3026915a6a12789928f45e248945b6ed2d8c7fc9: Status 404 returned error can't find the container with id 997f0d675c048f215b7e4b7f3026915a6a12789928f45e248945b6ed2d8c7fc9 Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.510850 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.552625 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzbhg\" (UniqueName: \"kubernetes.io/projected/20d90439-f776-4d23-b376-a392b6c0448d-kube-api-access-dzbhg\") pod \"20d90439-f776-4d23-b376-a392b6c0448d\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.552701 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-utilities\") pod \"20d90439-f776-4d23-b376-a392b6c0448d\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.552718 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-catalog-content\") pod \"20d90439-f776-4d23-b376-a392b6c0448d\" (UID: \"20d90439-f776-4d23-b376-a392b6c0448d\") " Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.554013 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-utilities" (OuterVolumeSpecName: "utilities") pod "20d90439-f776-4d23-b376-a392b6c0448d" (UID: "20d90439-f776-4d23-b376-a392b6c0448d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.560314 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20d90439-f776-4d23-b376-a392b6c0448d-kube-api-access-dzbhg" (OuterVolumeSpecName: "kube-api-access-dzbhg") pod "20d90439-f776-4d23-b376-a392b6c0448d" (UID: "20d90439-f776-4d23-b376-a392b6c0448d"). InnerVolumeSpecName "kube-api-access-dzbhg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.630468 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20d90439-f776-4d23-b376-a392b6c0448d" (UID: "20d90439-f776-4d23-b376-a392b6c0448d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.646873 4861 generic.go:334] "Generic (PLEG): container finished" podID="20d90439-f776-4d23-b376-a392b6c0448d" containerID="57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82" exitCode=0 Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.646927 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhrf" event={"ID":"20d90439-f776-4d23-b376-a392b6c0448d","Type":"ContainerDied","Data":"57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82"} Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.646950 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8hhrf" event={"ID":"20d90439-f776-4d23-b376-a392b6c0448d","Type":"ContainerDied","Data":"1d7d5789209f0b32a73b86400a1a86c2474b5200bd5d8600d7b2f8ebcbd578b9"} Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.646965 4861 scope.go:117] "RemoveContainer" containerID="57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.647054 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8hhrf" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.653037 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-dfg5n" event={"ID":"54091384-0c14-42be-a638-a8abb8171ad1","Type":"ContainerStarted","Data":"8c90fa3d8ed1a49ce33a6cbbf07bcb59095050022f181bff71ee264287b42fa3"} Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.653102 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-dfg5n" event={"ID":"54091384-0c14-42be-a638-a8abb8171ad1","Type":"ContainerStarted","Data":"27103b9bf4f100d28f52417f91eeb00165fa8a907dddc211be9c85d7a265068a"} Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.653117 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-dfg5n" event={"ID":"54091384-0c14-42be-a638-a8abb8171ad1","Type":"ContainerStarted","Data":"997f0d675c048f215b7e4b7f3026915a6a12789928f45e248945b6ed2d8c7fc9"} Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.653554 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-dfg5n" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.653801 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.655299 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20d90439-f776-4d23-b376-a392b6c0448d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.655336 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzbhg\" (UniqueName: \"kubernetes.io/projected/20d90439-f776-4d23-b376-a392b6c0448d-kube-api-access-dzbhg\") on node \"crc\" DevicePath \"\"" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.656025 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-b58f5" event={"ID":"55af9f43-c216-4c63-9ca4-564120262b41","Type":"ContainerStarted","Data":"468edf98c1ecb52a76d2d267a9586a96d2b435028f083200d0e9ba25e1bb5734"} Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.656212 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.672901 4861 scope.go:117] "RemoveContainer" containerID="46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.675022 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-dfg5n" podStartSLOduration=3.675009876 podStartE2EDuration="3.675009876s" podCreationTimestamp="2026-01-29 06:49:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:49:44.671680479 +0000 UTC m=+876.343175036" watchObservedRunningTime="2026-01-29 06:49:44.675009876 +0000 UTC m=+876.346504433" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.693766 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8hhrf"] Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.703877 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8hhrf"] Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.707957 4861 scope.go:117] "RemoveContainer" containerID="e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.717361 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-b58f5" podStartSLOduration=3.717341731 podStartE2EDuration="3.717341731s" podCreationTimestamp="2026-01-29 06:49:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:49:44.708270884 +0000 UTC m=+876.379765451" watchObservedRunningTime="2026-01-29 06:49:44.717341731 +0000 UTC m=+876.388836288" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.745303 4861 scope.go:117] "RemoveContainer" containerID="57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82" Jan 29 06:49:44 crc kubenswrapper[4861]: E0129 06:49:44.745956 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82\": container with ID starting with 57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82 not found: ID does not exist" containerID="57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.745991 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82"} err="failed to get container status \"57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82\": rpc error: code = NotFound desc = could not find container \"57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82\": container with ID starting with 57818024b44b81c16cde3798055b1bf0677fc7668db7f2fb6ce92947368d4c82 not found: ID does not exist" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.746012 4861 scope.go:117] "RemoveContainer" containerID="46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e" Jan 29 06:49:44 crc kubenswrapper[4861]: E0129 06:49:44.746429 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e\": container with ID starting with 46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e not found: ID does not exist" containerID="46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.746473 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e"} err="failed to get container status \"46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e\": rpc error: code = NotFound desc = could not find container \"46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e\": container with ID starting with 46e8f8d52efec838e1785f2bb23aeb800125b495c440a86900ae3abb6321138e not found: ID does not exist" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.746501 4861 scope.go:117] "RemoveContainer" containerID="e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7" Jan 29 06:49:44 crc kubenswrapper[4861]: E0129 06:49:44.746773 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7\": container with ID starting with e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7 not found: ID does not exist" containerID="e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7" Jan 29 06:49:44 crc kubenswrapper[4861]: I0129 06:49:44.746819 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7"} err="failed to get container status \"e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7\": rpc error: code = NotFound desc = could not find container \"e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7\": container with ID starting with e7935c20f737a0fa7c10ca76f971ec4896548fb2c6c5bbf41245d52e36ae90f7 not found: ID does not exist" Jan 29 06:49:45 crc kubenswrapper[4861]: I0129 06:49:45.139683 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20d90439-f776-4d23-b376-a392b6c0448d" path="/var/lib/kubelet/pods/20d90439-f776-4d23-b376-a392b6c0448d/volumes" Jan 29 06:49:51 crc kubenswrapper[4861]: I0129 06:49:51.710305 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" event={"ID":"16a3011f-4927-45e1-8748-a8baf0db3e61","Type":"ContainerStarted","Data":"21f4b67d249e6bb9f6731de3e9f7a56f6f45598a6a2ea81f1ce07661d5351339"} Jan 29 06:49:51 crc kubenswrapper[4861]: I0129 06:49:51.711069 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:49:51 crc kubenswrapper[4861]: I0129 06:49:51.712377 4861 generic.go:334] "Generic (PLEG): container finished" podID="83f5313a-3181-4f9a-a7b3-9dfbd14719be" containerID="871bd615a1eccc3ca7b1389a716b2e5ad67c32a350f283b0ceedde21cc7e169a" exitCode=0 Jan 29 06:49:51 crc kubenswrapper[4861]: I0129 06:49:51.712422 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerDied","Data":"871bd615a1eccc3ca7b1389a716b2e5ad67c32a350f283b0ceedde21cc7e169a"} Jan 29 06:49:51 crc kubenswrapper[4861]: I0129 06:49:51.734151 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" podStartSLOduration=3.197198433 podStartE2EDuration="10.734127269s" podCreationTimestamp="2026-01-29 06:49:41 +0000 UTC" firstStartedPulling="2026-01-29 06:49:43.22450988 +0000 UTC m=+874.896004437" lastFinishedPulling="2026-01-29 06:49:50.761438696 +0000 UTC m=+882.432933273" observedRunningTime="2026-01-29 06:49:51.731519571 +0000 UTC m=+883.403014168" watchObservedRunningTime="2026-01-29 06:49:51.734127269 +0000 UTC m=+883.405621866" Jan 29 06:49:52 crc kubenswrapper[4861]: I0129 06:49:52.721918 4861 generic.go:334] "Generic (PLEG): container finished" podID="83f5313a-3181-4f9a-a7b3-9dfbd14719be" containerID="c1d45072541d50533fe4fadcc25e46811f9d66aca9d930edfed325975e5eede8" exitCode=0 Jan 29 06:49:52 crc kubenswrapper[4861]: I0129 06:49:52.721979 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerDied","Data":"c1d45072541d50533fe4fadcc25e46811f9d66aca9d930edfed325975e5eede8"} Jan 29 06:49:53 crc kubenswrapper[4861]: I0129 06:49:53.731205 4861 generic.go:334] "Generic (PLEG): container finished" podID="83f5313a-3181-4f9a-a7b3-9dfbd14719be" containerID="4f185445c24e5297cd88751d86a9b45981f61cd258b7e0ad6106099b1e565908" exitCode=0 Jan 29 06:49:53 crc kubenswrapper[4861]: I0129 06:49:53.731266 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerDied","Data":"4f185445c24e5297cd88751d86a9b45981f61cd258b7e0ad6106099b1e565908"} Jan 29 06:49:54 crc kubenswrapper[4861]: I0129 06:49:54.749122 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerStarted","Data":"d74ca0f118e54e6a03c8da06b62f84e610b104cdc3ad02a92fb9bb6616bddece"} Jan 29 06:49:54 crc kubenswrapper[4861]: I0129 06:49:54.749602 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerStarted","Data":"b12e866e203007a36da9f2b8ea1739973db7a2db32410757142dad0283a616d0"} Jan 29 06:49:54 crc kubenswrapper[4861]: I0129 06:49:54.749634 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerStarted","Data":"9e370c3709f9b299a61fb37fd18cbb11fb4a250cff0ffed07bba1e51f3e3d8eb"} Jan 29 06:49:54 crc kubenswrapper[4861]: I0129 06:49:54.749662 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerStarted","Data":"71c48d0b95cb2dce96cbe56d06f1ed30a11121a731655722fa465789207d15cb"} Jan 29 06:49:54 crc kubenswrapper[4861]: I0129 06:49:54.749684 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerStarted","Data":"3c877ffeef6d662f580e757c260c4b737674779edc043fa073eb717047441dbc"} Jan 29 06:49:55 crc kubenswrapper[4861]: I0129 06:49:55.766682 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pwx9r" event={"ID":"83f5313a-3181-4f9a-a7b3-9dfbd14719be","Type":"ContainerStarted","Data":"606d6ec0ef7ea44d7038ba558c23f81573b3ed111755089eecc89990b4cd6ea7"} Jan 29 06:49:55 crc kubenswrapper[4861]: I0129 06:49:55.767246 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:55 crc kubenswrapper[4861]: I0129 06:49:55.807245 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-pwx9r" podStartSLOduration=7.073665911 podStartE2EDuration="14.807222521s" podCreationTimestamp="2026-01-29 06:49:41 +0000 UTC" firstStartedPulling="2026-01-29 06:49:42.995224504 +0000 UTC m=+874.666719101" lastFinishedPulling="2026-01-29 06:49:50.728781144 +0000 UTC m=+882.400275711" observedRunningTime="2026-01-29 06:49:55.801374078 +0000 UTC m=+887.472868715" watchObservedRunningTime="2026-01-29 06:49:55.807222521 +0000 UTC m=+887.478717098" Jan 29 06:49:57 crc kubenswrapper[4861]: I0129 06:49:57.758760 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:49:57 crc kubenswrapper[4861]: I0129 06:49:57.799684 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:50:00 crc kubenswrapper[4861]: I0129 06:50:00.630187 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:50:00 crc kubenswrapper[4861]: I0129 06:50:00.630564 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:50:02 crc kubenswrapper[4861]: I0129 06:50:02.775932 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cq4j7" Jan 29 06:50:02 crc kubenswrapper[4861]: I0129 06:50:02.996165 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-b58f5" Jan 29 06:50:03 crc kubenswrapper[4861]: I0129 06:50:03.810439 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-dfg5n" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.186797 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg"] Jan 29 06:50:05 crc kubenswrapper[4861]: E0129 06:50:05.187127 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20d90439-f776-4d23-b376-a392b6c0448d" containerName="registry-server" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.187140 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="20d90439-f776-4d23-b376-a392b6c0448d" containerName="registry-server" Jan 29 06:50:05 crc kubenswrapper[4861]: E0129 06:50:05.187154 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20d90439-f776-4d23-b376-a392b6c0448d" containerName="extract-content" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.187161 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="20d90439-f776-4d23-b376-a392b6c0448d" containerName="extract-content" Jan 29 06:50:05 crc kubenswrapper[4861]: E0129 06:50:05.187173 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20d90439-f776-4d23-b376-a392b6c0448d" containerName="extract-utilities" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.187181 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="20d90439-f776-4d23-b376-a392b6c0448d" containerName="extract-utilities" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.187321 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="20d90439-f776-4d23-b376-a392b6c0448d" containerName="registry-server" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.188269 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.191823 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.202863 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg"] Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.301865 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.302019 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54czq\" (UniqueName: \"kubernetes.io/projected/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-kube-api-access-54czq\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.302060 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.403898 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.404066 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54czq\" (UniqueName: \"kubernetes.io/projected/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-kube-api-access-54czq\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.404142 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.404590 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.404778 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.421687 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54czq\" (UniqueName: \"kubernetes.io/projected/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-kube-api-access-54czq\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.521490 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:05 crc kubenswrapper[4861]: I0129 06:50:05.947415 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg"] Jan 29 06:50:06 crc kubenswrapper[4861]: I0129 06:50:06.850587 4861 generic.go:334] "Generic (PLEG): container finished" podID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerID="7f975779edc82a3fdcaaefac6ab321610d0597471b2a08b92359595904b91d24" exitCode=0 Jan 29 06:50:06 crc kubenswrapper[4861]: I0129 06:50:06.850672 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" event={"ID":"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6","Type":"ContainerDied","Data":"7f975779edc82a3fdcaaefac6ab321610d0597471b2a08b92359595904b91d24"} Jan 29 06:50:06 crc kubenswrapper[4861]: I0129 06:50:06.850850 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" event={"ID":"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6","Type":"ContainerStarted","Data":"7759daecc280632dc59aa94c55dc8956e04062dbfcc18339bd30979f2291e2e5"} Jan 29 06:50:11 crc kubenswrapper[4861]: I0129 06:50:11.892045 4861 generic.go:334] "Generic (PLEG): container finished" podID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerID="4b1c399fd295e874d011aa797f9c1e483028476cfd9703f9e6350362a69ba3ec" exitCode=0 Jan 29 06:50:11 crc kubenswrapper[4861]: I0129 06:50:11.892217 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" event={"ID":"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6","Type":"ContainerDied","Data":"4b1c399fd295e874d011aa797f9c1e483028476cfd9703f9e6350362a69ba3ec"} Jan 29 06:50:12 crc kubenswrapper[4861]: I0129 06:50:12.763063 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-pwx9r" Jan 29 06:50:12 crc kubenswrapper[4861]: I0129 06:50:12.905262 4861 generic.go:334] "Generic (PLEG): container finished" podID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerID="f5f99810ceddf55a72e0e44d6c455dfb6b873a7b864003f4990785ef5dbb4272" exitCode=0 Jan 29 06:50:12 crc kubenswrapper[4861]: I0129 06:50:12.905406 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" event={"ID":"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6","Type":"ContainerDied","Data":"f5f99810ceddf55a72e0e44d6c455dfb6b873a7b864003f4990785ef5dbb4272"} Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.307925 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.440887 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-util\") pod \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.441186 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54czq\" (UniqueName: \"kubernetes.io/projected/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-kube-api-access-54czq\") pod \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.441365 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-bundle\") pod \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\" (UID: \"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6\") " Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.442570 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-bundle" (OuterVolumeSpecName: "bundle") pod "e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" (UID: "e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.453296 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-kube-api-access-54czq" (OuterVolumeSpecName: "kube-api-access-54czq") pod "e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" (UID: "e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6"). InnerVolumeSpecName "kube-api-access-54czq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.455675 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-util" (OuterVolumeSpecName: "util") pod "e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" (UID: "e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.542885 4861 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-util\") on node \"crc\" DevicePath \"\"" Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.542936 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54czq\" (UniqueName: \"kubernetes.io/projected/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-kube-api-access-54czq\") on node \"crc\" DevicePath \"\"" Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.542956 4861 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.922626 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" event={"ID":"e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6","Type":"ContainerDied","Data":"7759daecc280632dc59aa94c55dc8956e04062dbfcc18339bd30979f2291e2e5"} Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.923099 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7759daecc280632dc59aa94c55dc8956e04062dbfcc18339bd30979f2291e2e5" Jan 29 06:50:14 crc kubenswrapper[4861]: I0129 06:50:14.922700 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.857852 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz"] Jan 29 06:50:19 crc kubenswrapper[4861]: E0129 06:50:19.859039 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerName="pull" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.859101 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerName="pull" Jan 29 06:50:19 crc kubenswrapper[4861]: E0129 06:50:19.859124 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerName="util" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.859137 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerName="util" Jan 29 06:50:19 crc kubenswrapper[4861]: E0129 06:50:19.859194 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerName="extract" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.859208 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerName="extract" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.859551 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6" containerName="extract" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.860428 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.863829 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.863986 4861 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-5z584" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.882751 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz"] Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.883099 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.921432 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t5gc\" (UniqueName: \"kubernetes.io/projected/33b2649d-eb8c-451f-b184-f931fd61d0c5-kube-api-access-7t5gc\") pod \"cert-manager-operator-controller-manager-66c8bdd694-vt8gz\" (UID: \"33b2649d-eb8c-451f-b184-f931fd61d0c5\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" Jan 29 06:50:19 crc kubenswrapper[4861]: I0129 06:50:19.921537 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/33b2649d-eb8c-451f-b184-f931fd61d0c5-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-vt8gz\" (UID: \"33b2649d-eb8c-451f-b184-f931fd61d0c5\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" Jan 29 06:50:20 crc kubenswrapper[4861]: I0129 06:50:20.023335 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t5gc\" (UniqueName: \"kubernetes.io/projected/33b2649d-eb8c-451f-b184-f931fd61d0c5-kube-api-access-7t5gc\") pod \"cert-manager-operator-controller-manager-66c8bdd694-vt8gz\" (UID: \"33b2649d-eb8c-451f-b184-f931fd61d0c5\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" Jan 29 06:50:20 crc kubenswrapper[4861]: I0129 06:50:20.023505 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/33b2649d-eb8c-451f-b184-f931fd61d0c5-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-vt8gz\" (UID: \"33b2649d-eb8c-451f-b184-f931fd61d0c5\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" Jan 29 06:50:20 crc kubenswrapper[4861]: I0129 06:50:20.024229 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/33b2649d-eb8c-451f-b184-f931fd61d0c5-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-vt8gz\" (UID: \"33b2649d-eb8c-451f-b184-f931fd61d0c5\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" Jan 29 06:50:20 crc kubenswrapper[4861]: I0129 06:50:20.049183 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t5gc\" (UniqueName: \"kubernetes.io/projected/33b2649d-eb8c-451f-b184-f931fd61d0c5-kube-api-access-7t5gc\") pod \"cert-manager-operator-controller-manager-66c8bdd694-vt8gz\" (UID: \"33b2649d-eb8c-451f-b184-f931fd61d0c5\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" Jan 29 06:50:20 crc kubenswrapper[4861]: I0129 06:50:20.190752 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" Jan 29 06:50:20 crc kubenswrapper[4861]: I0129 06:50:20.787765 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz"] Jan 29 06:50:20 crc kubenswrapper[4861]: I0129 06:50:20.963603 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" event={"ID":"33b2649d-eb8c-451f-b184-f931fd61d0c5","Type":"ContainerStarted","Data":"e5a0201a1e4d5c4d3ae2868a3b9e76272fe723ad140d48e7332842f21b769153"} Jan 29 06:50:23 crc kubenswrapper[4861]: I0129 06:50:23.982926 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" event={"ID":"33b2649d-eb8c-451f-b184-f931fd61d0c5","Type":"ContainerStarted","Data":"cea23b6168774fa7f63ba9d1258c05ff01500d3f880cf3d242f32f0be3a609e1"} Jan 29 06:50:24 crc kubenswrapper[4861]: I0129 06:50:24.014342 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-vt8gz" podStartSLOduration=2.092474078 podStartE2EDuration="5.014326574s" podCreationTimestamp="2026-01-29 06:50:19 +0000 UTC" firstStartedPulling="2026-01-29 06:50:20.800670831 +0000 UTC m=+912.472165388" lastFinishedPulling="2026-01-29 06:50:23.722523297 +0000 UTC m=+915.394017884" observedRunningTime="2026-01-29 06:50:24.01379302 +0000 UTC m=+915.685287627" watchObservedRunningTime="2026-01-29 06:50:24.014326574 +0000 UTC m=+915.685821131" Jan 29 06:50:27 crc kubenswrapper[4861]: I0129 06:50:27.956782 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-r8stv"] Jan 29 06:50:27 crc kubenswrapper[4861]: I0129 06:50:27.958506 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:27 crc kubenswrapper[4861]: I0129 06:50:27.961914 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 29 06:50:27 crc kubenswrapper[4861]: I0129 06:50:27.962870 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 29 06:50:27 crc kubenswrapper[4861]: I0129 06:50:27.964063 4861 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-lbmlh" Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.014819 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-r8stv"] Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.149683 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs2fc\" (UniqueName: \"kubernetes.io/projected/480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf-kube-api-access-bs2fc\") pod \"cert-manager-webhook-6888856db4-r8stv\" (UID: \"480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf\") " pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.149734 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-r8stv\" (UID: \"480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf\") " pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.251463 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs2fc\" (UniqueName: \"kubernetes.io/projected/480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf-kube-api-access-bs2fc\") pod \"cert-manager-webhook-6888856db4-r8stv\" (UID: \"480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf\") " pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.251587 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-r8stv\" (UID: \"480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf\") " pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.284121 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-r8stv\" (UID: \"480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf\") " pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.284745 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs2fc\" (UniqueName: \"kubernetes.io/projected/480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf-kube-api-access-bs2fc\") pod \"cert-manager-webhook-6888856db4-r8stv\" (UID: \"480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf\") " pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.317531 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:28 crc kubenswrapper[4861]: I0129 06:50:28.761109 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-r8stv"] Jan 29 06:50:29 crc kubenswrapper[4861]: I0129 06:50:29.021022 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" event={"ID":"480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf","Type":"ContainerStarted","Data":"b2c58c8a4f0d96006ae486167876f5cc5ba8b890d1a40c2e1087a49e5432a05e"} Jan 29 06:50:30 crc kubenswrapper[4861]: I0129 06:50:30.629979 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:50:30 crc kubenswrapper[4861]: I0129 06:50:30.630042 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.552202 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-hzfm6"] Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.553806 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.556106 4861 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-sv8nf" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.569652 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-hzfm6"] Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.642715 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvrvq\" (UniqueName: \"kubernetes.io/projected/2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa-kube-api-access-mvrvq\") pod \"cert-manager-cainjector-5545bd876-hzfm6\" (UID: \"2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa\") " pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.642840 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-hzfm6\" (UID: \"2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa\") " pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.744185 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-hzfm6\" (UID: \"2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa\") " pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.744313 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvrvq\" (UniqueName: \"kubernetes.io/projected/2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa-kube-api-access-mvrvq\") pod \"cert-manager-cainjector-5545bd876-hzfm6\" (UID: \"2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa\") " pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.771127 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-hzfm6\" (UID: \"2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa\") " pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.772011 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvrvq\" (UniqueName: \"kubernetes.io/projected/2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa-kube-api-access-mvrvq\") pod \"cert-manager-cainjector-5545bd876-hzfm6\" (UID: \"2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa\") " pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" Jan 29 06:50:33 crc kubenswrapper[4861]: I0129 06:50:33.870183 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" Jan 29 06:50:34 crc kubenswrapper[4861]: I0129 06:50:34.054051 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" event={"ID":"480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf","Type":"ContainerStarted","Data":"4170cced514fd75c68b7910a43ccca3b013d0eab0d6a4f16bce2f15004c5e626"} Jan 29 06:50:34 crc kubenswrapper[4861]: I0129 06:50:34.054577 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:34 crc kubenswrapper[4861]: I0129 06:50:34.074751 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" podStartSLOduration=2.479398468 podStartE2EDuration="7.074730406s" podCreationTimestamp="2026-01-29 06:50:27 +0000 UTC" firstStartedPulling="2026-01-29 06:50:28.767915593 +0000 UTC m=+920.439410150" lastFinishedPulling="2026-01-29 06:50:33.363247511 +0000 UTC m=+925.034742088" observedRunningTime="2026-01-29 06:50:34.070957757 +0000 UTC m=+925.742452324" watchObservedRunningTime="2026-01-29 06:50:34.074730406 +0000 UTC m=+925.746224963" Jan 29 06:50:34 crc kubenswrapper[4861]: I0129 06:50:34.136747 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-hzfm6"] Jan 29 06:50:35 crc kubenswrapper[4861]: I0129 06:50:35.063178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" event={"ID":"2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa","Type":"ContainerStarted","Data":"f13515bee5b06904ef39380ad624a964e31d9bb71b1c548d38d36acf9bce1b45"} Jan 29 06:50:35 crc kubenswrapper[4861]: I0129 06:50:35.063565 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" event={"ID":"2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa","Type":"ContainerStarted","Data":"a5866ba7aa2779bc50bad6c9b83e96fa433766e0bc91a2f6e83314435f325523"} Jan 29 06:50:35 crc kubenswrapper[4861]: I0129 06:50:35.082243 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-5545bd876-hzfm6" podStartSLOduration=2.082228378 podStartE2EDuration="2.082228378s" podCreationTimestamp="2026-01-29 06:50:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:50:35.0803752 +0000 UTC m=+926.751869757" watchObservedRunningTime="2026-01-29 06:50:35.082228378 +0000 UTC m=+926.753722935" Jan 29 06:50:38 crc kubenswrapper[4861]: I0129 06:50:38.321014 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-6888856db4-r8stv" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.182044 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-545d4d4674-9l5lz"] Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.194115 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-545d4d4674-9l5lz" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.196694 4861 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-qg79b" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.224817 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-545d4d4674-9l5lz"] Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.331152 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa8044ac-0d9d-4431-b0b2-cbd3016db3d0-bound-sa-token\") pod \"cert-manager-545d4d4674-9l5lz\" (UID: \"aa8044ac-0d9d-4431-b0b2-cbd3016db3d0\") " pod="cert-manager/cert-manager-545d4d4674-9l5lz" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.331480 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q7xp\" (UniqueName: \"kubernetes.io/projected/aa8044ac-0d9d-4431-b0b2-cbd3016db3d0-kube-api-access-4q7xp\") pod \"cert-manager-545d4d4674-9l5lz\" (UID: \"aa8044ac-0d9d-4431-b0b2-cbd3016db3d0\") " pod="cert-manager/cert-manager-545d4d4674-9l5lz" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.434021 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q7xp\" (UniqueName: \"kubernetes.io/projected/aa8044ac-0d9d-4431-b0b2-cbd3016db3d0-kube-api-access-4q7xp\") pod \"cert-manager-545d4d4674-9l5lz\" (UID: \"aa8044ac-0d9d-4431-b0b2-cbd3016db3d0\") " pod="cert-manager/cert-manager-545d4d4674-9l5lz" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.434209 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa8044ac-0d9d-4431-b0b2-cbd3016db3d0-bound-sa-token\") pod \"cert-manager-545d4d4674-9l5lz\" (UID: \"aa8044ac-0d9d-4431-b0b2-cbd3016db3d0\") " pod="cert-manager/cert-manager-545d4d4674-9l5lz" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.471703 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa8044ac-0d9d-4431-b0b2-cbd3016db3d0-bound-sa-token\") pod \"cert-manager-545d4d4674-9l5lz\" (UID: \"aa8044ac-0d9d-4431-b0b2-cbd3016db3d0\") " pod="cert-manager/cert-manager-545d4d4674-9l5lz" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.479476 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q7xp\" (UniqueName: \"kubernetes.io/projected/aa8044ac-0d9d-4431-b0b2-cbd3016db3d0-kube-api-access-4q7xp\") pod \"cert-manager-545d4d4674-9l5lz\" (UID: \"aa8044ac-0d9d-4431-b0b2-cbd3016db3d0\") " pod="cert-manager/cert-manager-545d4d4674-9l5lz" Jan 29 06:50:57 crc kubenswrapper[4861]: I0129 06:50:57.544573 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-545d4d4674-9l5lz" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.060346 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-545d4d4674-9l5lz"] Jan 29 06:50:58 crc kubenswrapper[4861]: W0129 06:50:58.071833 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa8044ac_0d9d_4431_b0b2_cbd3016db3d0.slice/crio-074df3601ccee5c78e6ec06df9edc435f1d421cfae31974afe4c35e07e972c9e WatchSource:0}: Error finding container 074df3601ccee5c78e6ec06df9edc435f1d421cfae31974afe4c35e07e972c9e: Status 404 returned error can't find the container with id 074df3601ccee5c78e6ec06df9edc435f1d421cfae31974afe4c35e07e972c9e Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.251170 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-545d4d4674-9l5lz" event={"ID":"aa8044ac-0d9d-4431-b0b2-cbd3016db3d0","Type":"ContainerStarted","Data":"7eefbb32642f300618c645ccb76df0d3aa1f323e591f33655673a58ba50fe630"} Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.251260 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-545d4d4674-9l5lz" event={"ID":"aa8044ac-0d9d-4431-b0b2-cbd3016db3d0","Type":"ContainerStarted","Data":"074df3601ccee5c78e6ec06df9edc435f1d421cfae31974afe4c35e07e972c9e"} Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.261712 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f57lb"] Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.263751 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.282581 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f57lb"] Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.298942 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-545d4d4674-9l5lz" podStartSLOduration=1.2989230680000001 podStartE2EDuration="1.298923068s" podCreationTimestamp="2026-01-29 06:50:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:50:58.293980489 +0000 UTC m=+949.965475076" watchObservedRunningTime="2026-01-29 06:50:58.298923068 +0000 UTC m=+949.970417645" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.448445 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4r54\" (UniqueName: \"kubernetes.io/projected/b16925c5-1b02-4c8b-b915-82c4caac055f-kube-api-access-v4r54\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.448493 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-utilities\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.448539 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-catalog-content\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.550078 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4r54\" (UniqueName: \"kubernetes.io/projected/b16925c5-1b02-4c8b-b915-82c4caac055f-kube-api-access-v4r54\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.550147 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-utilities\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.550191 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-catalog-content\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.550714 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-catalog-content\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.551281 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-utilities\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.571136 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4r54\" (UniqueName: \"kubernetes.io/projected/b16925c5-1b02-4c8b-b915-82c4caac055f-kube-api-access-v4r54\") pod \"certified-operators-f57lb\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:58 crc kubenswrapper[4861]: I0129 06:50:58.586015 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:50:59 crc kubenswrapper[4861]: I0129 06:50:59.038679 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f57lb"] Jan 29 06:50:59 crc kubenswrapper[4861]: W0129 06:50:59.054240 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb16925c5_1b02_4c8b_b915_82c4caac055f.slice/crio-9fa9712e5bafc76090d88a9207d77ed9a20a1fc48a2788c22dd9cee0f4ed75d2 WatchSource:0}: Error finding container 9fa9712e5bafc76090d88a9207d77ed9a20a1fc48a2788c22dd9cee0f4ed75d2: Status 404 returned error can't find the container with id 9fa9712e5bafc76090d88a9207d77ed9a20a1fc48a2788c22dd9cee0f4ed75d2 Jan 29 06:50:59 crc kubenswrapper[4861]: I0129 06:50:59.258154 4861 generic.go:334] "Generic (PLEG): container finished" podID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerID="0a5c827ad45cf52195393ea80d0d0faaac423770f861bc26e42a364f89e593e3" exitCode=0 Jan 29 06:50:59 crc kubenswrapper[4861]: I0129 06:50:59.259233 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f57lb" event={"ID":"b16925c5-1b02-4c8b-b915-82c4caac055f","Type":"ContainerDied","Data":"0a5c827ad45cf52195393ea80d0d0faaac423770f861bc26e42a364f89e593e3"} Jan 29 06:50:59 crc kubenswrapper[4861]: I0129 06:50:59.259287 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f57lb" event={"ID":"b16925c5-1b02-4c8b-b915-82c4caac055f","Type":"ContainerStarted","Data":"9fa9712e5bafc76090d88a9207d77ed9a20a1fc48a2788c22dd9cee0f4ed75d2"} Jan 29 06:51:00 crc kubenswrapper[4861]: I0129 06:51:00.269278 4861 generic.go:334] "Generic (PLEG): container finished" podID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerID="6cbc3822598360ec5d24afd316eb2993d2ad85f67a5693620cf1cedb8b7091cd" exitCode=0 Jan 29 06:51:00 crc kubenswrapper[4861]: I0129 06:51:00.269385 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f57lb" event={"ID":"b16925c5-1b02-4c8b-b915-82c4caac055f","Type":"ContainerDied","Data":"6cbc3822598360ec5d24afd316eb2993d2ad85f67a5693620cf1cedb8b7091cd"} Jan 29 06:51:00 crc kubenswrapper[4861]: I0129 06:51:00.629583 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:51:00 crc kubenswrapper[4861]: I0129 06:51:00.629649 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:51:00 crc kubenswrapper[4861]: I0129 06:51:00.629692 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:51:00 crc kubenswrapper[4861]: I0129 06:51:00.630272 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a3e571fb457ea966d33ae87dfd58f64d47243c7a436da1c6aa743ed114c9efd5"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 06:51:00 crc kubenswrapper[4861]: I0129 06:51:00.630329 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://a3e571fb457ea966d33ae87dfd58f64d47243c7a436da1c6aa743ed114c9efd5" gracePeriod=600 Jan 29 06:51:01 crc kubenswrapper[4861]: I0129 06:51:01.278682 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f57lb" event={"ID":"b16925c5-1b02-4c8b-b915-82c4caac055f","Type":"ContainerStarted","Data":"4a5be8ee7bb95ae5c148bfee8d4b4437112e6fe59f3350892081bf53282c4262"} Jan 29 06:51:01 crc kubenswrapper[4861]: I0129 06:51:01.281490 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="a3e571fb457ea966d33ae87dfd58f64d47243c7a436da1c6aa743ed114c9efd5" exitCode=0 Jan 29 06:51:01 crc kubenswrapper[4861]: I0129 06:51:01.281532 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"a3e571fb457ea966d33ae87dfd58f64d47243c7a436da1c6aa743ed114c9efd5"} Jan 29 06:51:01 crc kubenswrapper[4861]: I0129 06:51:01.281558 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"6286a50c0abd3320b1618a8f91d0446eb73b0dae9310f72e53e305c4914d0508"} Jan 29 06:51:01 crc kubenswrapper[4861]: I0129 06:51:01.281575 4861 scope.go:117] "RemoveContainer" containerID="7f4deb5daa5740edb0e7467d9109be2012b4d8eeb7a5057275a40485d2be7713" Jan 29 06:51:01 crc kubenswrapper[4861]: I0129 06:51:01.312825 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f57lb" podStartSLOduration=1.725228894 podStartE2EDuration="3.3128058s" podCreationTimestamp="2026-01-29 06:50:58 +0000 UTC" firstStartedPulling="2026-01-29 06:50:59.260573606 +0000 UTC m=+950.932068163" lastFinishedPulling="2026-01-29 06:51:00.848150512 +0000 UTC m=+952.519645069" observedRunningTime="2026-01-29 06:51:01.309312669 +0000 UTC m=+952.980807246" watchObservedRunningTime="2026-01-29 06:51:01.3128058 +0000 UTC m=+952.984300367" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.065335 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-d6xnb"] Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.067194 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d6xnb" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.071439 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-k85t6" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.071808 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.071955 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.072296 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d6xnb"] Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.156122 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r74k\" (UniqueName: \"kubernetes.io/projected/07013be0-e734-4a8e-a285-55b0901f7e3b-kube-api-access-5r74k\") pod \"openstack-operator-index-d6xnb\" (UID: \"07013be0-e734-4a8e-a285-55b0901f7e3b\") " pod="openstack-operators/openstack-operator-index-d6xnb" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.259314 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r74k\" (UniqueName: \"kubernetes.io/projected/07013be0-e734-4a8e-a285-55b0901f7e3b-kube-api-access-5r74k\") pod \"openstack-operator-index-d6xnb\" (UID: \"07013be0-e734-4a8e-a285-55b0901f7e3b\") " pod="openstack-operators/openstack-operator-index-d6xnb" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.301144 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r74k\" (UniqueName: \"kubernetes.io/projected/07013be0-e734-4a8e-a285-55b0901f7e3b-kube-api-access-5r74k\") pod \"openstack-operator-index-d6xnb\" (UID: \"07013be0-e734-4a8e-a285-55b0901f7e3b\") " pod="openstack-operators/openstack-operator-index-d6xnb" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.403784 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d6xnb" Jan 29 06:51:05 crc kubenswrapper[4861]: I0129 06:51:05.957459 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d6xnb"] Jan 29 06:51:06 crc kubenswrapper[4861]: I0129 06:51:06.326852 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d6xnb" event={"ID":"07013be0-e734-4a8e-a285-55b0901f7e3b","Type":"ContainerStarted","Data":"09f0a1b779228bc6e60553e5429b51a572161f1803480a71fce4a577dfc0da57"} Jan 29 06:51:07 crc kubenswrapper[4861]: I0129 06:51:07.338464 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d6xnb" event={"ID":"07013be0-e734-4a8e-a285-55b0901f7e3b","Type":"ContainerStarted","Data":"d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1"} Jan 29 06:51:07 crc kubenswrapper[4861]: I0129 06:51:07.358966 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-d6xnb" podStartSLOduration=1.6294297370000002 podStartE2EDuration="2.358951142s" podCreationTimestamp="2026-01-29 06:51:05 +0000 UTC" firstStartedPulling="2026-01-29 06:51:05.964356231 +0000 UTC m=+957.635850788" lastFinishedPulling="2026-01-29 06:51:06.693877626 +0000 UTC m=+958.365372193" observedRunningTime="2026-01-29 06:51:07.355543393 +0000 UTC m=+959.027037960" watchObservedRunningTime="2026-01-29 06:51:07.358951142 +0000 UTC m=+959.030445699" Jan 29 06:51:08 crc kubenswrapper[4861]: I0129 06:51:08.587574 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:51:08 crc kubenswrapper[4861]: I0129 06:51:08.587649 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:51:08 crc kubenswrapper[4861]: I0129 06:51:08.699751 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:51:08 crc kubenswrapper[4861]: I0129 06:51:08.828986 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-d6xnb"] Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.351703 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-d6xnb" podUID="07013be0-e734-4a8e-a285-55b0901f7e3b" containerName="registry-server" containerID="cri-o://d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1" gracePeriod=2 Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.433291 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.449008 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-mb56g"] Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.450692 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.479323 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-mb56g"] Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.523570 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbdtx\" (UniqueName: \"kubernetes.io/projected/2929ade7-fa9f-46a1-9810-0263ea016347-kube-api-access-sbdtx\") pod \"openstack-operator-index-mb56g\" (UID: \"2929ade7-fa9f-46a1-9810-0263ea016347\") " pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.624627 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbdtx\" (UniqueName: \"kubernetes.io/projected/2929ade7-fa9f-46a1-9810-0263ea016347-kube-api-access-sbdtx\") pod \"openstack-operator-index-mb56g\" (UID: \"2929ade7-fa9f-46a1-9810-0263ea016347\") " pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.659776 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbdtx\" (UniqueName: \"kubernetes.io/projected/2929ade7-fa9f-46a1-9810-0263ea016347-kube-api-access-sbdtx\") pod \"openstack-operator-index-mb56g\" (UID: \"2929ade7-fa9f-46a1-9810-0263ea016347\") " pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.777616 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d6xnb" Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.809990 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.828339 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r74k\" (UniqueName: \"kubernetes.io/projected/07013be0-e734-4a8e-a285-55b0901f7e3b-kube-api-access-5r74k\") pod \"07013be0-e734-4a8e-a285-55b0901f7e3b\" (UID: \"07013be0-e734-4a8e-a285-55b0901f7e3b\") " Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.832484 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07013be0-e734-4a8e-a285-55b0901f7e3b-kube-api-access-5r74k" (OuterVolumeSpecName: "kube-api-access-5r74k") pod "07013be0-e734-4a8e-a285-55b0901f7e3b" (UID: "07013be0-e734-4a8e-a285-55b0901f7e3b"). InnerVolumeSpecName "kube-api-access-5r74k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:51:09 crc kubenswrapper[4861]: I0129 06:51:09.930181 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r74k\" (UniqueName: \"kubernetes.io/projected/07013be0-e734-4a8e-a285-55b0901f7e3b-kube-api-access-5r74k\") on node \"crc\" DevicePath \"\"" Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.033813 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-mb56g"] Jan 29 06:51:10 crc kubenswrapper[4861]: W0129 06:51:10.044271 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2929ade7_fa9f_46a1_9810_0263ea016347.slice/crio-1076d4b60aecba227179e0c527ce0acf7f219534ed16689704f69c4f964ccef3 WatchSource:0}: Error finding container 1076d4b60aecba227179e0c527ce0acf7f219534ed16689704f69c4f964ccef3: Status 404 returned error can't find the container with id 1076d4b60aecba227179e0c527ce0acf7f219534ed16689704f69c4f964ccef3 Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.360782 4861 generic.go:334] "Generic (PLEG): container finished" podID="07013be0-e734-4a8e-a285-55b0901f7e3b" containerID="d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1" exitCode=0 Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.360868 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d6xnb" event={"ID":"07013be0-e734-4a8e-a285-55b0901f7e3b","Type":"ContainerDied","Data":"d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1"} Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.360943 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d6xnb" event={"ID":"07013be0-e734-4a8e-a285-55b0901f7e3b","Type":"ContainerDied","Data":"09f0a1b779228bc6e60553e5429b51a572161f1803480a71fce4a577dfc0da57"} Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.360974 4861 scope.go:117] "RemoveContainer" containerID="d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1" Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.360889 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d6xnb" Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.364374 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-mb56g" event={"ID":"2929ade7-fa9f-46a1-9810-0263ea016347","Type":"ContainerStarted","Data":"1076d4b60aecba227179e0c527ce0acf7f219534ed16689704f69c4f964ccef3"} Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.387123 4861 scope.go:117] "RemoveContainer" containerID="d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1" Jan 29 06:51:10 crc kubenswrapper[4861]: E0129 06:51:10.387624 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1\": container with ID starting with d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1 not found: ID does not exist" containerID="d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1" Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.387683 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1"} err="failed to get container status \"d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1\": rpc error: code = NotFound desc = could not find container \"d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1\": container with ID starting with d054f1395bd2ec9f0f64355859da53172d33c9ad14c21121db269ccc7e4b3fd1 not found: ID does not exist" Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.401740 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-d6xnb"] Jan 29 06:51:10 crc kubenswrapper[4861]: I0129 06:51:10.409356 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-d6xnb"] Jan 29 06:51:11 crc kubenswrapper[4861]: I0129 06:51:11.140757 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07013be0-e734-4a8e-a285-55b0901f7e3b" path="/var/lib/kubelet/pods/07013be0-e734-4a8e-a285-55b0901f7e3b/volumes" Jan 29 06:51:11 crc kubenswrapper[4861]: I0129 06:51:11.375018 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-mb56g" event={"ID":"2929ade7-fa9f-46a1-9810-0263ea016347","Type":"ContainerStarted","Data":"8605c75fef32a93a7f1340f847e8168a745bf1218a03bec20cac0be44c73abc1"} Jan 29 06:51:11 crc kubenswrapper[4861]: I0129 06:51:11.402562 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-mb56g" podStartSLOduration=1.9895939440000001 podStartE2EDuration="2.402537186s" podCreationTimestamp="2026-01-29 06:51:09 +0000 UTC" firstStartedPulling="2026-01-29 06:51:10.053167302 +0000 UTC m=+961.724661859" lastFinishedPulling="2026-01-29 06:51:10.466110534 +0000 UTC m=+962.137605101" observedRunningTime="2026-01-29 06:51:11.393965093 +0000 UTC m=+963.065459710" watchObservedRunningTime="2026-01-29 06:51:11.402537186 +0000 UTC m=+963.074031783" Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.233018 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f57lb"] Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.233755 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f57lb" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerName="registry-server" containerID="cri-o://4a5be8ee7bb95ae5c148bfee8d4b4437112e6fe59f3350892081bf53282c4262" gracePeriod=2 Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.397276 4861 generic.go:334] "Generic (PLEG): container finished" podID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerID="4a5be8ee7bb95ae5c148bfee8d4b4437112e6fe59f3350892081bf53282c4262" exitCode=0 Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.397353 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f57lb" event={"ID":"b16925c5-1b02-4c8b-b915-82c4caac055f","Type":"ContainerDied","Data":"4a5be8ee7bb95ae5c148bfee8d4b4437112e6fe59f3350892081bf53282c4262"} Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.655832 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.760633 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4r54\" (UniqueName: \"kubernetes.io/projected/b16925c5-1b02-4c8b-b915-82c4caac055f-kube-api-access-v4r54\") pod \"b16925c5-1b02-4c8b-b915-82c4caac055f\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.760856 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-utilities\") pod \"b16925c5-1b02-4c8b-b915-82c4caac055f\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.760895 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-catalog-content\") pod \"b16925c5-1b02-4c8b-b915-82c4caac055f\" (UID: \"b16925c5-1b02-4c8b-b915-82c4caac055f\") " Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.762034 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-utilities" (OuterVolumeSpecName: "utilities") pod "b16925c5-1b02-4c8b-b915-82c4caac055f" (UID: "b16925c5-1b02-4c8b-b915-82c4caac055f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.766042 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b16925c5-1b02-4c8b-b915-82c4caac055f-kube-api-access-v4r54" (OuterVolumeSpecName: "kube-api-access-v4r54") pod "b16925c5-1b02-4c8b-b915-82c4caac055f" (UID: "b16925c5-1b02-4c8b-b915-82c4caac055f"). InnerVolumeSpecName "kube-api-access-v4r54". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.830732 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b16925c5-1b02-4c8b-b915-82c4caac055f" (UID: "b16925c5-1b02-4c8b-b915-82c4caac055f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.862503 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4r54\" (UniqueName: \"kubernetes.io/projected/b16925c5-1b02-4c8b-b915-82c4caac055f-kube-api-access-v4r54\") on node \"crc\" DevicePath \"\"" Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.862539 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:51:12 crc kubenswrapper[4861]: I0129 06:51:12.862551 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16925c5-1b02-4c8b-b915-82c4caac055f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:51:13 crc kubenswrapper[4861]: I0129 06:51:13.408271 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f57lb" event={"ID":"b16925c5-1b02-4c8b-b915-82c4caac055f","Type":"ContainerDied","Data":"9fa9712e5bafc76090d88a9207d77ed9a20a1fc48a2788c22dd9cee0f4ed75d2"} Jan 29 06:51:13 crc kubenswrapper[4861]: I0129 06:51:13.408329 4861 scope.go:117] "RemoveContainer" containerID="4a5be8ee7bb95ae5c148bfee8d4b4437112e6fe59f3350892081bf53282c4262" Jan 29 06:51:13 crc kubenswrapper[4861]: I0129 06:51:13.408376 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f57lb" Jan 29 06:51:13 crc kubenswrapper[4861]: I0129 06:51:13.427902 4861 scope.go:117] "RemoveContainer" containerID="6cbc3822598360ec5d24afd316eb2993d2ad85f67a5693620cf1cedb8b7091cd" Jan 29 06:51:13 crc kubenswrapper[4861]: I0129 06:51:13.436285 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f57lb"] Jan 29 06:51:13 crc kubenswrapper[4861]: I0129 06:51:13.441260 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f57lb"] Jan 29 06:51:13 crc kubenswrapper[4861]: I0129 06:51:13.464779 4861 scope.go:117] "RemoveContainer" containerID="0a5c827ad45cf52195393ea80d0d0faaac423770f861bc26e42a364f89e593e3" Jan 29 06:51:15 crc kubenswrapper[4861]: I0129 06:51:15.125570 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" path="/var/lib/kubelet/pods/b16925c5-1b02-4c8b-b915-82c4caac055f/volumes" Jan 29 06:51:19 crc kubenswrapper[4861]: I0129 06:51:19.810814 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:19 crc kubenswrapper[4861]: I0129 06:51:19.811429 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:19 crc kubenswrapper[4861]: I0129 06:51:19.854834 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:20 crc kubenswrapper[4861]: I0129 06:51:20.478664 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-mb56g" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.476645 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8"] Jan 29 06:51:22 crc kubenswrapper[4861]: E0129 06:51:22.476972 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07013be0-e734-4a8e-a285-55b0901f7e3b" containerName="registry-server" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.476993 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="07013be0-e734-4a8e-a285-55b0901f7e3b" containerName="registry-server" Jan 29 06:51:22 crc kubenswrapper[4861]: E0129 06:51:22.477012 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerName="extract-content" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.477023 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerName="extract-content" Jan 29 06:51:22 crc kubenswrapper[4861]: E0129 06:51:22.477052 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerName="extract-utilities" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.477063 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerName="extract-utilities" Jan 29 06:51:22 crc kubenswrapper[4861]: E0129 06:51:22.477103 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerName="registry-server" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.477116 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerName="registry-server" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.477302 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="07013be0-e734-4a8e-a285-55b0901f7e3b" containerName="registry-server" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.477326 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b16925c5-1b02-4c8b-b915-82c4caac055f" containerName="registry-server" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.478611 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.481093 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-8hh84" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.499603 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8"] Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.586530 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-bundle\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.586625 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-util\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.586679 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxvx4\" (UniqueName: \"kubernetes.io/projected/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-kube-api-access-sxvx4\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.688292 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxvx4\" (UniqueName: \"kubernetes.io/projected/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-kube-api-access-sxvx4\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.688369 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-bundle\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.688407 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-util\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.688799 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-util\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.689257 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-bundle\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.717652 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxvx4\" (UniqueName: \"kubernetes.io/projected/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-kube-api-access-sxvx4\") pod \"da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:22 crc kubenswrapper[4861]: I0129 06:51:22.794757 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:23 crc kubenswrapper[4861]: I0129 06:51:23.051528 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8"] Jan 29 06:51:23 crc kubenswrapper[4861]: I0129 06:51:23.481976 4861 generic.go:334] "Generic (PLEG): container finished" podID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerID="ba20882f6395b9ad1e9c37b1959b5b8096f7a1c213a4e27130b3a702d7d06a04" exitCode=0 Jan 29 06:51:23 crc kubenswrapper[4861]: I0129 06:51:23.482037 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" event={"ID":"ba8147a9-0ccc-4cc8-974b-b6eaa899a226","Type":"ContainerDied","Data":"ba20882f6395b9ad1e9c37b1959b5b8096f7a1c213a4e27130b3a702d7d06a04"} Jan 29 06:51:23 crc kubenswrapper[4861]: I0129 06:51:23.483039 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" event={"ID":"ba8147a9-0ccc-4cc8-974b-b6eaa899a226","Type":"ContainerStarted","Data":"a1ae7693bb18e6b41fd8e3417c1bd2727fd47f82757c55fc43dc768f1fe28891"} Jan 29 06:51:24 crc kubenswrapper[4861]: I0129 06:51:24.491410 4861 generic.go:334] "Generic (PLEG): container finished" podID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerID="0347a5ca2d8bd3c4efa11d9da8df7081c17298225e6b527506196ba6218a1747" exitCode=0 Jan 29 06:51:24 crc kubenswrapper[4861]: I0129 06:51:24.491789 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" event={"ID":"ba8147a9-0ccc-4cc8-974b-b6eaa899a226","Type":"ContainerDied","Data":"0347a5ca2d8bd3c4efa11d9da8df7081c17298225e6b527506196ba6218a1747"} Jan 29 06:51:25 crc kubenswrapper[4861]: I0129 06:51:25.499504 4861 generic.go:334] "Generic (PLEG): container finished" podID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerID="eeee531071465ba248762c9397cc81dc334a0b6b9579d73af43019b908f05715" exitCode=0 Jan 29 06:51:25 crc kubenswrapper[4861]: I0129 06:51:25.499558 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" event={"ID":"ba8147a9-0ccc-4cc8-974b-b6eaa899a226","Type":"ContainerDied","Data":"eeee531071465ba248762c9397cc81dc334a0b6b9579d73af43019b908f05715"} Jan 29 06:51:26 crc kubenswrapper[4861]: I0129 06:51:26.746437 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:26 crc kubenswrapper[4861]: I0129 06:51:26.943769 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-bundle\") pod \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " Jan 29 06:51:26 crc kubenswrapper[4861]: I0129 06:51:26.943844 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxvx4\" (UniqueName: \"kubernetes.io/projected/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-kube-api-access-sxvx4\") pod \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " Jan 29 06:51:26 crc kubenswrapper[4861]: I0129 06:51:26.943944 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-util\") pod \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\" (UID: \"ba8147a9-0ccc-4cc8-974b-b6eaa899a226\") " Jan 29 06:51:26 crc kubenswrapper[4861]: I0129 06:51:26.944567 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-bundle" (OuterVolumeSpecName: "bundle") pod "ba8147a9-0ccc-4cc8-974b-b6eaa899a226" (UID: "ba8147a9-0ccc-4cc8-974b-b6eaa899a226"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:51:26 crc kubenswrapper[4861]: I0129 06:51:26.953356 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-kube-api-access-sxvx4" (OuterVolumeSpecName: "kube-api-access-sxvx4") pod "ba8147a9-0ccc-4cc8-974b-b6eaa899a226" (UID: "ba8147a9-0ccc-4cc8-974b-b6eaa899a226"). InnerVolumeSpecName "kube-api-access-sxvx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:51:26 crc kubenswrapper[4861]: I0129 06:51:26.971329 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-util" (OuterVolumeSpecName: "util") pod "ba8147a9-0ccc-4cc8-974b-b6eaa899a226" (UID: "ba8147a9-0ccc-4cc8-974b-b6eaa899a226"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:51:27 crc kubenswrapper[4861]: I0129 06:51:27.046536 4861 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:51:27 crc kubenswrapper[4861]: I0129 06:51:27.046587 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxvx4\" (UniqueName: \"kubernetes.io/projected/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-kube-api-access-sxvx4\") on node \"crc\" DevicePath \"\"" Jan 29 06:51:27 crc kubenswrapper[4861]: I0129 06:51:27.046606 4861 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba8147a9-0ccc-4cc8-974b-b6eaa899a226-util\") on node \"crc\" DevicePath \"\"" Jan 29 06:51:27 crc kubenswrapper[4861]: I0129 06:51:27.515553 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" event={"ID":"ba8147a9-0ccc-4cc8-974b-b6eaa899a226","Type":"ContainerDied","Data":"a1ae7693bb18e6b41fd8e3417c1bd2727fd47f82757c55fc43dc768f1fe28891"} Jan 29 06:51:27 crc kubenswrapper[4861]: I0129 06:51:27.515590 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1ae7693bb18e6b41fd8e3417c1bd2727fd47f82757c55fc43dc768f1fe28891" Jan 29 06:51:27 crc kubenswrapper[4861]: I0129 06:51:27.515618 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.557176 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx"] Jan 29 06:51:34 crc kubenswrapper[4861]: E0129 06:51:34.557988 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerName="util" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.558003 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerName="util" Jan 29 06:51:34 crc kubenswrapper[4861]: E0129 06:51:34.558026 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerName="pull" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.558035 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerName="pull" Jan 29 06:51:34 crc kubenswrapper[4861]: E0129 06:51:34.558053 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerName="extract" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.558063 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerName="extract" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.558248 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba8147a9-0ccc-4cc8-974b-b6eaa899a226" containerName="extract" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.558729 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.560665 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-ncl97" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.587907 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx"] Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.747760 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4l2p\" (UniqueName: \"kubernetes.io/projected/9ea19e58-6067-48c1-993c-b1d0cced8997-kube-api-access-f4l2p\") pod \"openstack-operator-controller-init-5c4cd4c8c8-hcfcx\" (UID: \"9ea19e58-6067-48c1-993c-b1d0cced8997\") " pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.849188 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4l2p\" (UniqueName: \"kubernetes.io/projected/9ea19e58-6067-48c1-993c-b1d0cced8997-kube-api-access-f4l2p\") pod \"openstack-operator-controller-init-5c4cd4c8c8-hcfcx\" (UID: \"9ea19e58-6067-48c1-993c-b1d0cced8997\") " pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.873177 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4l2p\" (UniqueName: \"kubernetes.io/projected/9ea19e58-6067-48c1-993c-b1d0cced8997-kube-api-access-f4l2p\") pod \"openstack-operator-controller-init-5c4cd4c8c8-hcfcx\" (UID: \"9ea19e58-6067-48c1-993c-b1d0cced8997\") " pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" Jan 29 06:51:34 crc kubenswrapper[4861]: I0129 06:51:34.878617 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" Jan 29 06:51:35 crc kubenswrapper[4861]: I0129 06:51:35.096534 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx"] Jan 29 06:51:35 crc kubenswrapper[4861]: I0129 06:51:35.601991 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" event={"ID":"9ea19e58-6067-48c1-993c-b1d0cced8997","Type":"ContainerStarted","Data":"2bf10fda17d2645a0ab323b60dc120f6df984e42c7f019178e05e8be189bfdb6"} Jan 29 06:51:41 crc kubenswrapper[4861]: I0129 06:51:41.645976 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" event={"ID":"9ea19e58-6067-48c1-993c-b1d0cced8997","Type":"ContainerStarted","Data":"0c2dd74c33980841f8a2f53ab15c6e5964123a4781cc0e504b6649aaa95a9fbf"} Jan 29 06:51:41 crc kubenswrapper[4861]: I0129 06:51:41.646450 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" Jan 29 06:51:41 crc kubenswrapper[4861]: I0129 06:51:41.686552 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" podStartSLOduration=1.552938924 podStartE2EDuration="7.68652412s" podCreationTimestamp="2026-01-29 06:51:34 +0000 UTC" firstStartedPulling="2026-01-29 06:51:35.110574069 +0000 UTC m=+986.782068646" lastFinishedPulling="2026-01-29 06:51:41.244159255 +0000 UTC m=+992.915653842" observedRunningTime="2026-01-29 06:51:41.681955742 +0000 UTC m=+993.353450409" watchObservedRunningTime="2026-01-29 06:51:41.68652412 +0000 UTC m=+993.358018717" Jan 29 06:51:54 crc kubenswrapper[4861]: I0129 06:51:54.882708 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-5c4cd4c8c8-hcfcx" Jan 29 06:52:13 crc kubenswrapper[4861]: I0129 06:52:13.896313 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj"] Jan 29 06:52:13 crc kubenswrapper[4861]: I0129 06:52:13.897673 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" Jan 29 06:52:13 crc kubenswrapper[4861]: I0129 06:52:13.901355 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-8gmbt" Jan 29 06:52:13 crc kubenswrapper[4861]: I0129 06:52:13.906570 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj"] Jan 29 06:52:13 crc kubenswrapper[4861]: I0129 06:52:13.911830 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6"] Jan 29 06:52:13 crc kubenswrapper[4861]: I0129 06:52:13.912563 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" Jan 29 06:52:13 crc kubenswrapper[4861]: I0129 06:52:13.937873 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-d9txc" Jan 29 06:52:13 crc kubenswrapper[4861]: I0129 06:52:13.953989 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.010836 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlgn9\" (UniqueName: \"kubernetes.io/projected/b708431b-9a40-4216-8ff2-e52626a78852-kube-api-access-mlgn9\") pod \"cinder-operator-controller-manager-8d874c8fc-tx9q6\" (UID: \"b708431b-9a40-4216-8ff2-e52626a78852\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.011162 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbbrb\" (UniqueName: \"kubernetes.io/projected/0e2f821a-5976-405b-860f-fc5c14ca3c06-kube-api-access-bbbrb\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-s68hj\" (UID: \"0e2f821a-5976-405b-860f-fc5c14ca3c06\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.035234 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.036485 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.042135 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.043591 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.043603 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-tszld" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.045973 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-4vj5z" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.063373 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.064357 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.065688 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-n6j5p" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.089150 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.102649 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.112517 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbbrb\" (UniqueName: \"kubernetes.io/projected/0e2f821a-5976-405b-860f-fc5c14ca3c06-kube-api-access-bbbrb\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-s68hj\" (UID: \"0e2f821a-5976-405b-860f-fc5c14ca3c06\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.112599 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27nv4\" (UniqueName: \"kubernetes.io/projected/6cd88ee1-a38e-43f0-9470-d7a9745665df-kube-api-access-27nv4\") pod \"glance-operator-controller-manager-8886f4c47-dw8n5\" (UID: \"6cd88ee1-a38e-43f0-9470-d7a9745665df\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.112626 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtxhw\" (UniqueName: \"kubernetes.io/projected/7c81b981-2cba-4cc6-a5c8-aa3d86378e3a-kube-api-access-qtxhw\") pod \"designate-operator-controller-manager-6d9697b7f4-7gs4c\" (UID: \"7c81b981-2cba-4cc6-a5c8-aa3d86378e3a\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.112653 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlgn9\" (UniqueName: \"kubernetes.io/projected/b708431b-9a40-4216-8ff2-e52626a78852-kube-api-access-mlgn9\") pod \"cinder-operator-controller-manager-8d874c8fc-tx9q6\" (UID: \"b708431b-9a40-4216-8ff2-e52626a78852\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.113030 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.114105 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.118865 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-nmnc5" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.122141 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.128664 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.140648 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.141892 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.145806 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.146616 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.146822 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlgn9\" (UniqueName: \"kubernetes.io/projected/b708431b-9a40-4216-8ff2-e52626a78852-kube-api-access-mlgn9\") pod \"cinder-operator-controller-manager-8d874c8fc-tx9q6\" (UID: \"b708431b-9a40-4216-8ff2-e52626a78852\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.151282 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbbrb\" (UniqueName: \"kubernetes.io/projected/0e2f821a-5976-405b-860f-fc5c14ca3c06-kube-api-access-bbbrb\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-s68hj\" (UID: \"0e2f821a-5976-405b-860f-fc5c14ca3c06\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.153745 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.153891 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-c2g25" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.153807 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-959t2" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.159129 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.172303 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.173053 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.176416 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-rh2gg" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.196433 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.200586 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.202159 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.208394 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-jntxg" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.209942 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.218229 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtqnv\" (UniqueName: \"kubernetes.io/projected/e3e0da3c-3983-46a4-b9fe-ef07be8ca90e-kube-api-access-qtqnv\") pod \"heat-operator-controller-manager-69d6db494d-25p9x\" (UID: \"e3e0da3c-3983-46a4-b9fe-ef07be8ca90e\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.218274 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27nv4\" (UniqueName: \"kubernetes.io/projected/6cd88ee1-a38e-43f0-9470-d7a9745665df-kube-api-access-27nv4\") pod \"glance-operator-controller-manager-8886f4c47-dw8n5\" (UID: \"6cd88ee1-a38e-43f0-9470-d7a9745665df\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.218301 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtxhw\" (UniqueName: \"kubernetes.io/projected/7c81b981-2cba-4cc6-a5c8-aa3d86378e3a-kube-api-access-qtxhw\") pod \"designate-operator-controller-manager-6d9697b7f4-7gs4c\" (UID: \"7c81b981-2cba-4cc6-a5c8-aa3d86378e3a\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.218323 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm5nk\" (UniqueName: \"kubernetes.io/projected/c5c7a9b1-011f-435e-8a50-dd0ee333a811-kube-api-access-fm5nk\") pod \"ironic-operator-controller-manager-5f4b8bd54d-txmj4\" (UID: \"c5c7a9b1-011f-435e-8a50-dd0ee333a811\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.218347 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.218369 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rbdf\" (UniqueName: \"kubernetes.io/projected/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-kube-api-access-5rbdf\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.218423 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhrz9\" (UniqueName: \"kubernetes.io/projected/92f620b6-be39-449e-a1fc-ff64804364b5-kube-api-access-hhrz9\") pod \"horizon-operator-controller-manager-5fb775575f-ps8hr\" (UID: \"92f620b6-be39-449e-a1fc-ff64804364b5\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.238764 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.245669 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.246381 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.248600 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.254987 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.256061 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.267283 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-6xcj2" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.267761 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27nv4\" (UniqueName: \"kubernetes.io/projected/6cd88ee1-a38e-43f0-9470-d7a9745665df-kube-api-access-27nv4\") pod \"glance-operator-controller-manager-8886f4c47-dw8n5\" (UID: \"6cd88ee1-a38e-43f0-9470-d7a9745665df\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.274946 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtxhw\" (UniqueName: \"kubernetes.io/projected/7c81b981-2cba-4cc6-a5c8-aa3d86378e3a-kube-api-access-qtxhw\") pod \"designate-operator-controller-manager-6d9697b7f4-7gs4c\" (UID: \"7c81b981-2cba-4cc6-a5c8-aa3d86378e3a\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.277587 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.278423 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.280021 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-62hq2" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.288344 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.295192 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.297146 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.301703 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-tsbfj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.309280 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.310106 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.312277 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-dxn92" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.319258 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvc8r\" (UniqueName: \"kubernetes.io/projected/444df6a8-8c8f-4f2f-8092-1dd392f6eed1-kube-api-access-kvc8r\") pod \"manila-operator-controller-manager-7dd968899f-7hqbq\" (UID: \"444df6a8-8c8f-4f2f-8092-1dd392f6eed1\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.319307 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm5nk\" (UniqueName: \"kubernetes.io/projected/c5c7a9b1-011f-435e-8a50-dd0ee333a811-kube-api-access-fm5nk\") pod \"ironic-operator-controller-manager-5f4b8bd54d-txmj4\" (UID: \"c5c7a9b1-011f-435e-8a50-dd0ee333a811\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.319354 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.319381 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rbdf\" (UniqueName: \"kubernetes.io/projected/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-kube-api-access-5rbdf\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.319441 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47cbg\" (UniqueName: \"kubernetes.io/projected/0362a774-9d2b-491c-9a6b-96811db0d456-kube-api-access-47cbg\") pod \"keystone-operator-controller-manager-84f48565d4-v4qvf\" (UID: \"0362a774-9d2b-491c-9a6b-96811db0d456\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.319459 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lt8rq\" (UniqueName: \"kubernetes.io/projected/4ffdee81-542f-424a-b4ed-0db4b0ad2409-kube-api-access-lt8rq\") pod \"mariadb-operator-controller-manager-67bf948998-2tjv4\" (UID: \"4ffdee81-542f-424a-b4ed-0db4b0ad2409\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.319489 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhrz9\" (UniqueName: \"kubernetes.io/projected/92f620b6-be39-449e-a1fc-ff64804364b5-kube-api-access-hhrz9\") pod \"horizon-operator-controller-manager-5fb775575f-ps8hr\" (UID: \"92f620b6-be39-449e-a1fc-ff64804364b5\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.319518 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtqnv\" (UniqueName: \"kubernetes.io/projected/e3e0da3c-3983-46a4-b9fe-ef07be8ca90e-kube-api-access-qtqnv\") pod \"heat-operator-controller-manager-69d6db494d-25p9x\" (UID: \"e3e0da3c-3983-46a4-b9fe-ef07be8ca90e\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" Jan 29 06:52:14 crc kubenswrapper[4861]: E0129 06:52:14.319927 4861 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:14 crc kubenswrapper[4861]: E0129 06:52:14.319965 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert podName:b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:14.819950462 +0000 UTC m=+1026.491445019 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert") pod "infra-operator-controller-manager-79955696d6-p2vh6" (UID: "b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5") : secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.323173 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.331827 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.338036 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rbdf\" (UniqueName: \"kubernetes.io/projected/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-kube-api-access-5rbdf\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.338063 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtqnv\" (UniqueName: \"kubernetes.io/projected/e3e0da3c-3983-46a4-b9fe-ef07be8ca90e-kube-api-access-qtqnv\") pod \"heat-operator-controller-manager-69d6db494d-25p9x\" (UID: \"e3e0da3c-3983-46a4-b9fe-ef07be8ca90e\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.342799 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhrz9\" (UniqueName: \"kubernetes.io/projected/92f620b6-be39-449e-a1fc-ff64804364b5-kube-api-access-hhrz9\") pod \"horizon-operator-controller-manager-5fb775575f-ps8hr\" (UID: \"92f620b6-be39-449e-a1fc-ff64804364b5\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.343433 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm5nk\" (UniqueName: \"kubernetes.io/projected/c5c7a9b1-011f-435e-8a50-dd0ee333a811-kube-api-access-fm5nk\") pod \"ironic-operator-controller-manager-5f4b8bd54d-txmj4\" (UID: \"c5c7a9b1-011f-435e-8a50-dd0ee333a811\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.348153 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.348912 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.350920 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-f5rs7" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.357620 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.358771 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.361235 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.365632 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.374088 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-xxtdt" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.380357 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.381611 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.404391 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.429032 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47cbg\" (UniqueName: \"kubernetes.io/projected/0362a774-9d2b-491c-9a6b-96811db0d456-kube-api-access-47cbg\") pod \"keystone-operator-controller-manager-84f48565d4-v4qvf\" (UID: \"0362a774-9d2b-491c-9a6b-96811db0d456\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.429103 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lt8rq\" (UniqueName: \"kubernetes.io/projected/4ffdee81-542f-424a-b4ed-0db4b0ad2409-kube-api-access-lt8rq\") pod \"mariadb-operator-controller-manager-67bf948998-2tjv4\" (UID: \"4ffdee81-542f-424a-b4ed-0db4b0ad2409\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.429174 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjdmz\" (UniqueName: \"kubernetes.io/projected/45beb56d-51b6-4737-b0f7-cd7db1d86942-kube-api-access-mjdmz\") pod \"placement-operator-controller-manager-5b964cf4cd-2blwh\" (UID: \"45beb56d-51b6-4737-b0f7-cd7db1d86942\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.429201 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvc8r\" (UniqueName: \"kubernetes.io/projected/444df6a8-8c8f-4f2f-8092-1dd392f6eed1-kube-api-access-kvc8r\") pod \"manila-operator-controller-manager-7dd968899f-7hqbq\" (UID: \"444df6a8-8c8f-4f2f-8092-1dd392f6eed1\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.429230 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7kzj\" (UniqueName: \"kubernetes.io/projected/4c9a63c6-aa31-43cc-8b09-34a877dc2957-kube-api-access-t7kzj\") pod \"nova-operator-controller-manager-55bff696bd-xjbh9\" (UID: \"4c9a63c6-aa31-43cc-8b09-34a877dc2957\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.429252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4c8lc\" (UniqueName: \"kubernetes.io/projected/5d6f19fb-11f0-4854-8590-b97ecb2e2ab7-kube-api-access-4c8lc\") pod \"neutron-operator-controller-manager-585dbc889-nszsj\" (UID: \"5d6f19fb-11f0-4854-8590-b97ecb2e2ab7\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.429295 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knr6d\" (UniqueName: \"kubernetes.io/projected/e2b67e50-9400-44b6-b1ba-b6185f758932-kube-api-access-knr6d\") pod \"octavia-operator-controller-manager-6687f8d877-7cw4w\" (UID: \"e2b67e50-9400-44b6-b1ba-b6185f758932\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.441130 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.442183 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.449111 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.453797 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.454100 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.456359 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-btkfn" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.458572 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.460626 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-6h54g" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.462211 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47cbg\" (UniqueName: \"kubernetes.io/projected/0362a774-9d2b-491c-9a6b-96811db0d456-kube-api-access-47cbg\") pod \"keystone-operator-controller-manager-84f48565d4-v4qvf\" (UID: \"0362a774-9d2b-491c-9a6b-96811db0d456\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.467309 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lt8rq\" (UniqueName: \"kubernetes.io/projected/4ffdee81-542f-424a-b4ed-0db4b0ad2409-kube-api-access-lt8rq\") pod \"mariadb-operator-controller-manager-67bf948998-2tjv4\" (UID: \"4ffdee81-542f-424a-b4ed-0db4b0ad2409\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.469813 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvc8r\" (UniqueName: \"kubernetes.io/projected/444df6a8-8c8f-4f2f-8092-1dd392f6eed1-kube-api-access-kvc8r\") pod \"manila-operator-controller-manager-7dd968899f-7hqbq\" (UID: \"444df6a8-8c8f-4f2f-8092-1dd392f6eed1\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.498851 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.511006 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.518145 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.534500 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.536544 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ncb6\" (UniqueName: \"kubernetes.io/projected/eede0800-ba92-4df3-9115-6b97f05620da-kube-api-access-2ncb6\") pod \"ovn-operator-controller-manager-788c46999f-p8vt7\" (UID: \"eede0800-ba92-4df3-9115-6b97f05620da\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.538577 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.539989 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxpvr\" (UniqueName: \"kubernetes.io/projected/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-kube-api-access-kxpvr\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.540062 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjdmz\" (UniqueName: \"kubernetes.io/projected/45beb56d-51b6-4737-b0f7-cd7db1d86942-kube-api-access-mjdmz\") pod \"placement-operator-controller-manager-5b964cf4cd-2blwh\" (UID: \"45beb56d-51b6-4737-b0f7-cd7db1d86942\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.540118 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7kzj\" (UniqueName: \"kubernetes.io/projected/4c9a63c6-aa31-43cc-8b09-34a877dc2957-kube-api-access-t7kzj\") pod \"nova-operator-controller-manager-55bff696bd-xjbh9\" (UID: \"4c9a63c6-aa31-43cc-8b09-34a877dc2957\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.540138 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4c8lc\" (UniqueName: \"kubernetes.io/projected/5d6f19fb-11f0-4854-8590-b97ecb2e2ab7-kube-api-access-4c8lc\") pod \"neutron-operator-controller-manager-585dbc889-nszsj\" (UID: \"5d6f19fb-11f0-4854-8590-b97ecb2e2ab7\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.540180 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knr6d\" (UniqueName: \"kubernetes.io/projected/e2b67e50-9400-44b6-b1ba-b6185f758932-kube-api-access-knr6d\") pod \"octavia-operator-controller-manager-6687f8d877-7cw4w\" (UID: \"e2b67e50-9400-44b6-b1ba-b6185f758932\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.540195 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.551892 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.552865 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.555716 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-lzm9d" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.560678 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjdmz\" (UniqueName: \"kubernetes.io/projected/45beb56d-51b6-4737-b0f7-cd7db1d86942-kube-api-access-mjdmz\") pod \"placement-operator-controller-manager-5b964cf4cd-2blwh\" (UID: \"45beb56d-51b6-4737-b0f7-cd7db1d86942\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.564618 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4c8lc\" (UniqueName: \"kubernetes.io/projected/5d6f19fb-11f0-4854-8590-b97ecb2e2ab7-kube-api-access-4c8lc\") pod \"neutron-operator-controller-manager-585dbc889-nszsj\" (UID: \"5d6f19fb-11f0-4854-8590-b97ecb2e2ab7\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.565266 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knr6d\" (UniqueName: \"kubernetes.io/projected/e2b67e50-9400-44b6-b1ba-b6185f758932-kube-api-access-knr6d\") pod \"octavia-operator-controller-manager-6687f8d877-7cw4w\" (UID: \"e2b67e50-9400-44b6-b1ba-b6185f758932\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.567561 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7kzj\" (UniqueName: \"kubernetes.io/projected/4c9a63c6-aa31-43cc-8b09-34a877dc2957-kube-api-access-t7kzj\") pod \"nova-operator-controller-manager-55bff696bd-xjbh9\" (UID: \"4c9a63c6-aa31-43cc-8b09-34a877dc2957\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.600203 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.639309 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.641542 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnrf2\" (UniqueName: \"kubernetes.io/projected/a8e928e7-8173-4e7d-ae37-26eaeec75ccb-kube-api-access-lnrf2\") pod \"telemetry-operator-controller-manager-64b5b76f97-wwnvz\" (UID: \"a8e928e7-8173-4e7d-ae37-26eaeec75ccb\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.641599 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv6z9\" (UniqueName: \"kubernetes.io/projected/6bbf867e-a436-4e97-aaa5-a77f4ab796ee-kube-api-access-jv6z9\") pod \"swift-operator-controller-manager-68fc8c869-jpq7j\" (UID: \"6bbf867e-a436-4e97-aaa5-a77f4ab796ee\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.641622 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.641655 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ncb6\" (UniqueName: \"kubernetes.io/projected/eede0800-ba92-4df3-9115-6b97f05620da-kube-api-access-2ncb6\") pod \"ovn-operator-controller-manager-788c46999f-p8vt7\" (UID: \"eede0800-ba92-4df3-9115-6b97f05620da\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.641691 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxpvr\" (UniqueName: \"kubernetes.io/projected/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-kube-api-access-kxpvr\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:14 crc kubenswrapper[4861]: E0129 06:52:14.642321 4861 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:14 crc kubenswrapper[4861]: E0129 06:52:14.642378 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert podName:9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:15.142345128 +0000 UTC m=+1026.813839685 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert") pod "openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" (UID: "9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.646786 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.658602 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ncb6\" (UniqueName: \"kubernetes.io/projected/eede0800-ba92-4df3-9115-6b97f05620da-kube-api-access-2ncb6\") pod \"ovn-operator-controller-manager-788c46999f-p8vt7\" (UID: \"eede0800-ba92-4df3-9115-6b97f05620da\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.659712 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.667637 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.669191 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.670533 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.673321 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-h789n" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.674757 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxpvr\" (UniqueName: \"kubernetes.io/projected/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-kube-api-access-kxpvr\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.680986 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.690037 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.699530 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.708979 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-xckfp"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.709808 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.712486 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-txrgk" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.732081 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-xckfp"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.742863 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv6z9\" (UniqueName: \"kubernetes.io/projected/6bbf867e-a436-4e97-aaa5-a77f4ab796ee-kube-api-access-jv6z9\") pod \"swift-operator-controller-manager-68fc8c869-jpq7j\" (UID: \"6bbf867e-a436-4e97-aaa5-a77f4ab796ee\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.743002 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnrf2\" (UniqueName: \"kubernetes.io/projected/a8e928e7-8173-4e7d-ae37-26eaeec75ccb-kube-api-access-lnrf2\") pod \"telemetry-operator-controller-manager-64b5b76f97-wwnvz\" (UID: \"a8e928e7-8173-4e7d-ae37-26eaeec75ccb\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.760093 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnrf2\" (UniqueName: \"kubernetes.io/projected/a8e928e7-8173-4e7d-ae37-26eaeec75ccb-kube-api-access-lnrf2\") pod \"telemetry-operator-controller-manager-64b5b76f97-wwnvz\" (UID: \"a8e928e7-8173-4e7d-ae37-26eaeec75ccb\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.761348 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv6z9\" (UniqueName: \"kubernetes.io/projected/6bbf867e-a436-4e97-aaa5-a77f4ab796ee-kube-api-access-jv6z9\") pod \"swift-operator-controller-manager-68fc8c869-jpq7j\" (UID: \"6bbf867e-a436-4e97-aaa5-a77f4ab796ee\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.770047 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.770962 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.774097 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.775202 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-9dk2f" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.778849 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.783114 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.795974 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.797276 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.799840 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-g28jx" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.810187 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.814202 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.834563 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj"] Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.844825 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.844923 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg7xr\" (UniqueName: \"kubernetes.io/projected/79ee9746-9925-45d6-b37b-06ddee279d38-kube-api-access-kg7xr\") pod \"watcher-operator-controller-manager-564965969-xckfp\" (UID: \"79ee9746-9925-45d6-b37b-06ddee279d38\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.844943 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hw56\" (UniqueName: \"kubernetes.io/projected/30562a65-f3e8-4b37-b722-d1c0a8c03996-kube-api-access-8hw56\") pod \"test-operator-controller-manager-56f8bfcd9f-rw2h2\" (UID: \"30562a65-f3e8-4b37-b722-d1c0a8c03996\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" Jan 29 06:52:14 crc kubenswrapper[4861]: E0129 06:52:14.845058 4861 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:14 crc kubenswrapper[4861]: E0129 06:52:14.845115 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert podName:b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:15.845101272 +0000 UTC m=+1027.516595829 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert") pod "infra-operator-controller-manager-79955696d6-p2vh6" (UID: "b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5") : secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.882779 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.945902 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.945951 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82796\" (UniqueName: \"kubernetes.io/projected/e2e876a2-9fd5-4811-bd40-c3e07276fe2b-kube-api-access-82796\") pod \"rabbitmq-cluster-operator-manager-668c99d594-t7725\" (UID: \"e2e876a2-9fd5-4811-bd40-c3e07276fe2b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.945975 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hw56\" (UniqueName: \"kubernetes.io/projected/30562a65-f3e8-4b37-b722-d1c0a8c03996-kube-api-access-8hw56\") pod \"test-operator-controller-manager-56f8bfcd9f-rw2h2\" (UID: \"30562a65-f3e8-4b37-b722-d1c0a8c03996\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.945994 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg7xr\" (UniqueName: \"kubernetes.io/projected/79ee9746-9925-45d6-b37b-06ddee279d38-kube-api-access-kg7xr\") pod \"watcher-operator-controller-manager-564965969-xckfp\" (UID: \"79ee9746-9925-45d6-b37b-06ddee279d38\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.946043 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d4kw\" (UniqueName: \"kubernetes.io/projected/8f8a9499-a68a-4797-9801-a070bff21b9f-kube-api-access-4d4kw\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.946102 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.962713 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg7xr\" (UniqueName: \"kubernetes.io/projected/79ee9746-9925-45d6-b37b-06ddee279d38-kube-api-access-kg7xr\") pod \"watcher-operator-controller-manager-564965969-xckfp\" (UID: \"79ee9746-9925-45d6-b37b-06ddee279d38\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" Jan 29 06:52:14 crc kubenswrapper[4861]: I0129 06:52:14.968185 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hw56\" (UniqueName: \"kubernetes.io/projected/30562a65-f3e8-4b37-b722-d1c0a8c03996-kube-api-access-8hw56\") pod \"test-operator-controller-manager-56f8bfcd9f-rw2h2\" (UID: \"30562a65-f3e8-4b37-b722-d1c0a8c03996\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:14.992903 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.030420 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.036444 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.049093 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.049183 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.049210 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82796\" (UniqueName: \"kubernetes.io/projected/e2e876a2-9fd5-4811-bd40-c3e07276fe2b-kube-api-access-82796\") pod \"rabbitmq-cluster-operator-manager-668c99d594-t7725\" (UID: \"e2e876a2-9fd5-4811-bd40-c3e07276fe2b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.049255 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d4kw\" (UniqueName: \"kubernetes.io/projected/8f8a9499-a68a-4797-9801-a070bff21b9f-kube-api-access-4d4kw\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.049829 4861 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.049876 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:15.549863017 +0000 UTC m=+1027.221357575 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "metrics-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.049915 4861 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.049985 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:15.54996614 +0000 UTC m=+1027.221460697 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "webhook-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.065710 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82796\" (UniqueName: \"kubernetes.io/projected/e2e876a2-9fd5-4811-bd40-c3e07276fe2b-kube-api-access-82796\") pod \"rabbitmq-cluster-operator-manager-668c99d594-t7725\" (UID: \"e2e876a2-9fd5-4811-bd40-c3e07276fe2b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.066568 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d4kw\" (UniqueName: \"kubernetes.io/projected/8f8a9499-a68a-4797-9801-a070bff21b9f-kube-api-access-4d4kw\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.164173 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.167665 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.167923 4861 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.168027 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert podName:9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:16.168010211 +0000 UTC m=+1027.839504768 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert") pod "openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" (UID: "9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.176635 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.195110 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.350444 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr"] Jan 29 06:52:15 crc kubenswrapper[4861]: W0129 06:52:15.362006 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92f620b6_be39_449e_a1fc_ff64804364b5.slice/crio-5c67f4cfe20ea52f183fa4b9417057960ce39175ee4503bb9e571cce6889eec3 WatchSource:0}: Error finding container 5c67f4cfe20ea52f183fa4b9417057960ce39175ee4503bb9e571cce6889eec3: Status 404 returned error can't find the container with id 5c67f4cfe20ea52f183fa4b9417057960ce39175ee4503bb9e571cce6889eec3 Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.386551 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.390299 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.597728 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.607943 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.608168 4861 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.608209 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.608239 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:16.608222051 +0000 UTC m=+1028.279716608 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "metrics-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.608347 4861 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.608396 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:16.608380435 +0000 UTC m=+1028.279874992 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "webhook-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.619997 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.637466 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.642178 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq"] Jan 29 06:52:15 crc kubenswrapper[4861]: W0129 06:52:15.648356 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45beb56d_51b6_4737_b0f7_cd7db1d86942.slice/crio-ea4a0a82c9199de44db2d1557d9821dd2b119dcbc332fb7d9f54e4bab3318eba WatchSource:0}: Error finding container ea4a0a82c9199de44db2d1557d9821dd2b119dcbc332fb7d9f54e4bab3318eba: Status 404 returned error can't find the container with id ea4a0a82c9199de44db2d1557d9821dd2b119dcbc332fb7d9f54e4bab3318eba Jan 29 06:52:15 crc kubenswrapper[4861]: W0129 06:52:15.657192 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod444df6a8_8c8f_4f2f_8092_1dd392f6eed1.slice/crio-2513008bdaf7d4ee8ba62e2adede761996438f47bdb60ed3c5f32cf9ef5c8c52 WatchSource:0}: Error finding container 2513008bdaf7d4ee8ba62e2adede761996438f47bdb60ed3c5f32cf9ef5c8c52: Status 404 returned error can't find the container with id 2513008bdaf7d4ee8ba62e2adede761996438f47bdb60ed3c5f32cf9ef5c8c52 Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.716625 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj"] Jan 29 06:52:15 crc kubenswrapper[4861]: W0129 06:52:15.721806 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6bbf867e_a436_4e97_aaa5_a77f4ab796ee.slice/crio-b226cfab5b46c533460c7849bf81471e195e4e8f9b2bbf3c32da197d6d079550 WatchSource:0}: Error finding container b226cfab5b46c533460c7849bf81471e195e4e8f9b2bbf3c32da197d6d079550: Status 404 returned error can't find the container with id b226cfab5b46c533460c7849bf81471e195e4e8f9b2bbf3c32da197d6d079550 Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.726232 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.734164 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.739203 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.743728 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9"] Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.745173 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lnrf2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-64b5b76f97-wwnvz_openstack-operators(a8e928e7-8173-4e7d-ae37-26eaeec75ccb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.747502 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" podUID="a8e928e7-8173-4e7d-ae37-26eaeec75ccb" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.747655 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7"] Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.755148 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2ncb6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-788c46999f-p8vt7_openstack-operators(eede0800-ba92-4df3-9115-6b97f05620da): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.755315 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-knr6d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-6687f8d877-7cw4w_openstack-operators(e2b67e50-9400-44b6-b1ba-b6185f758932): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.756764 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" podUID="e2b67e50-9400-44b6-b1ba-b6185f758932" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.756782 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" podUID="eede0800-ba92-4df3-9115-6b97f05620da" Jan 29 06:52:15 crc kubenswrapper[4861]: W0129 06:52:15.767360 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d6f19fb_11f0_4854_8590_b97ecb2e2ab7.slice/crio-8b36aded104cf1ae7b711170ca0e88d9e45156436e2ab56ccd8252748a6e714e WatchSource:0}: Error finding container 8b36aded104cf1ae7b711170ca0e88d9e45156436e2ab56ccd8252748a6e714e: Status 404 returned error can't find the container with id 8b36aded104cf1ae7b711170ca0e88d9e45156436e2ab56ccd8252748a6e714e Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.880578 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-xckfp"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.884881 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2"] Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.890741 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" event={"ID":"c5c7a9b1-011f-435e-8a50-dd0ee333a811","Type":"ContainerStarted","Data":"f020b9dcae2c6512b8179a9486ef0212b64490ef21b06732a05c247dffeca39e"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.891793 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" event={"ID":"444df6a8-8c8f-4f2f-8092-1dd392f6eed1","Type":"ContainerStarted","Data":"2513008bdaf7d4ee8ba62e2adede761996438f47bdb60ed3c5f32cf9ef5c8c52"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.893229 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" event={"ID":"b708431b-9a40-4216-8ff2-e52626a78852","Type":"ContainerStarted","Data":"222aa9313a5cd3fc305a8b6dae4736647f517e811f1216db9d976d472b4b420e"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.899035 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" event={"ID":"4ffdee81-542f-424a-b4ed-0db4b0ad2409","Type":"ContainerStarted","Data":"2e13b64d80e698c0d40b631bf91dcf8705886efe253cf4af83c6109d7e442dbf"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.899977 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" event={"ID":"6bbf867e-a436-4e97-aaa5-a77f4ab796ee","Type":"ContainerStarted","Data":"b226cfab5b46c533460c7849bf81471e195e4e8f9b2bbf3c32da197d6d079550"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.901003 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" event={"ID":"0362a774-9d2b-491c-9a6b-96811db0d456","Type":"ContainerStarted","Data":"aa022a9f1de7d156c31daa3c0cda3a8b73c0a4ebde201336c73c62e3fd7cc470"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.902232 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725"] Jan 29 06:52:15 crc kubenswrapper[4861]: W0129 06:52:15.908155 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79ee9746_9925_45d6_b37b_06ddee279d38.slice/crio-20aa04a0bc93533a85ad04f90be6638afabeb79d036aec599ad8cb9d34d11a88 WatchSource:0}: Error finding container 20aa04a0bc93533a85ad04f90be6638afabeb79d036aec599ad8cb9d34d11a88: Status 404 returned error can't find the container with id 20aa04a0bc93533a85ad04f90be6638afabeb79d036aec599ad8cb9d34d11a88 Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.910340 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kg7xr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-564965969-xckfp_openstack-operators(79ee9746-9925-45d6-b37b-06ddee279d38): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 06:52:15 crc kubenswrapper[4861]: W0129 06:52:15.911389 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2e876a2_9fd5_4811_bd40_c3e07276fe2b.slice/crio-7f450008b41cb7ebf7317cc0654991ff7941d9fef58a3344cb413b7f8c47da12 WatchSource:0}: Error finding container 7f450008b41cb7ebf7317cc0654991ff7941d9fef58a3344cb413b7f8c47da12: Status 404 returned error can't find the container with id 7f450008b41cb7ebf7317cc0654991ff7941d9fef58a3344cb413b7f8c47da12 Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.911778 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" podUID="79ee9746-9925-45d6-b37b-06ddee279d38" Jan 29 06:52:15 crc kubenswrapper[4861]: W0129 06:52:15.913508 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30562a65_f3e8_4b37_b722_d1c0a8c03996.slice/crio-ee8f848bc0e83cec80225a73026b1b0bbb74c3f017ca4e313953a165e25d9103 WatchSource:0}: Error finding container ee8f848bc0e83cec80225a73026b1b0bbb74c3f017ca4e313953a165e25d9103: Status 404 returned error can't find the container with id ee8f848bc0e83cec80225a73026b1b0bbb74c3f017ca4e313953a165e25d9103 Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.913798 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.914050 4861 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.914319 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert podName:b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:17.914302172 +0000 UTC m=+1029.585796729 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert") pod "infra-operator-controller-manager-79955696d6-p2vh6" (UID: "b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5") : secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.915965 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" event={"ID":"6cd88ee1-a38e-43f0-9470-d7a9745665df","Type":"ContainerStarted","Data":"07f88bfd1c24e9bbf412523545d1cd926327ade415b74be5ccba43354253ab57"} Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.921811 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8hw56,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-56f8bfcd9f-rw2h2_openstack-operators(30562a65-f3e8-4b37-b722-d1c0a8c03996): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.922427 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-82796,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-t7725_openstack-operators(e2e876a2-9fd5-4811-bd40-c3e07276fe2b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.923044 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" podUID="30562a65-f3e8-4b37-b722-d1c0a8c03996" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.923490 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" podUID="e2e876a2-9fd5-4811-bd40-c3e07276fe2b" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.923834 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" event={"ID":"eede0800-ba92-4df3-9115-6b97f05620da","Type":"ContainerStarted","Data":"4de2d135cb79c324914f2055f2d08a47da6da1070677b1c73ca0e2794a1f9078"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.925724 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" event={"ID":"e2b67e50-9400-44b6-b1ba-b6185f758932","Type":"ContainerStarted","Data":"b0380836a155c6c5d2bdf22560aafcfa75ce3e1e02a75999c33645e8b9ec78fe"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.927348 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" event={"ID":"92f620b6-be39-449e-a1fc-ff64804364b5","Type":"ContainerStarted","Data":"5c67f4cfe20ea52f183fa4b9417057960ce39175ee4503bb9e571cce6889eec3"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.931807 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" event={"ID":"0e2f821a-5976-405b-860f-fc5c14ca3c06","Type":"ContainerStarted","Data":"994922af1a279694beecccef9c2d6a4d5bbd28b29f9f4a1b894ba7d8e34c4b29"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.933232 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" event={"ID":"7c81b981-2cba-4cc6-a5c8-aa3d86378e3a","Type":"ContainerStarted","Data":"1e67d96d7feb48c224e6a8228231ac058c290b5d0d20970c11c19773038f8f54"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.935194 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" event={"ID":"a8e928e7-8173-4e7d-ae37-26eaeec75ccb","Type":"ContainerStarted","Data":"386bf9fdb6a638e5ed4a36f61792b3cc0fc7471cdd37d7a6957a3aca30e36040"} Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.936570 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" podUID="a8e928e7-8173-4e7d-ae37-26eaeec75ccb" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.936667 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" podUID="eede0800-ba92-4df3-9115-6b97f05620da" Jan 29 06:52:15 crc kubenswrapper[4861]: E0129 06:52:15.939720 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" podUID="e2b67e50-9400-44b6-b1ba-b6185f758932" Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.945198 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" event={"ID":"e3e0da3c-3983-46a4-b9fe-ef07be8ca90e","Type":"ContainerStarted","Data":"9e98bdf03561159aebe6f9b8d35da05a2e54bf1f50522103bec4b156b4c52684"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.947668 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" event={"ID":"4c9a63c6-aa31-43cc-8b09-34a877dc2957","Type":"ContainerStarted","Data":"e75e2b88dae954c08c556ceddb0ff36183e383d9c972abe56f2c594c0b0d36aa"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.948883 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" event={"ID":"5d6f19fb-11f0-4854-8590-b97ecb2e2ab7","Type":"ContainerStarted","Data":"8b36aded104cf1ae7b711170ca0e88d9e45156436e2ab56ccd8252748a6e714e"} Jan 29 06:52:15 crc kubenswrapper[4861]: I0129 06:52:15.949927 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" event={"ID":"45beb56d-51b6-4737-b0f7-cd7db1d86942","Type":"ContainerStarted","Data":"ea4a0a82c9199de44db2d1557d9821dd2b119dcbc332fb7d9f54e4bab3318eba"} Jan 29 06:52:16 crc kubenswrapper[4861]: I0129 06:52:16.218026 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.218663 4861 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.218882 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert podName:9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:18.218777391 +0000 UTC m=+1029.890272008 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert") pod "openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" (UID: "9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:16 crc kubenswrapper[4861]: I0129 06:52:16.621846 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:16 crc kubenswrapper[4861]: I0129 06:52:16.621920 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.622035 4861 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.622092 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:18.622065211 +0000 UTC m=+1030.293559768 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "metrics-server-cert" not found Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.622372 4861 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.622408 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:18.622388009 +0000 UTC m=+1030.293882566 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "webhook-server-cert" not found Jan 29 06:52:16 crc kubenswrapper[4861]: I0129 06:52:16.959025 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" event={"ID":"30562a65-f3e8-4b37-b722-d1c0a8c03996","Type":"ContainerStarted","Data":"ee8f848bc0e83cec80225a73026b1b0bbb74c3f017ca4e313953a165e25d9103"} Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.960617 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" podUID="30562a65-f3e8-4b37-b722-d1c0a8c03996" Jan 29 06:52:16 crc kubenswrapper[4861]: I0129 06:52:16.964824 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" event={"ID":"e2e876a2-9fd5-4811-bd40-c3e07276fe2b","Type":"ContainerStarted","Data":"7f450008b41cb7ebf7317cc0654991ff7941d9fef58a3344cb413b7f8c47da12"} Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.967024 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" podUID="e2e876a2-9fd5-4811-bd40-c3e07276fe2b" Jan 29 06:52:16 crc kubenswrapper[4861]: I0129 06:52:16.969471 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" event={"ID":"79ee9746-9925-45d6-b37b-06ddee279d38","Type":"ContainerStarted","Data":"20aa04a0bc93533a85ad04f90be6638afabeb79d036aec599ad8cb9d34d11a88"} Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.970835 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" podUID="eede0800-ba92-4df3-9115-6b97f05620da" Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.970905 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" podUID="79ee9746-9925-45d6-b37b-06ddee279d38" Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.970948 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" podUID="a8e928e7-8173-4e7d-ae37-26eaeec75ccb" Jan 29 06:52:16 crc kubenswrapper[4861]: E0129 06:52:16.971369 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" podUID="e2b67e50-9400-44b6-b1ba-b6185f758932" Jan 29 06:52:17 crc kubenswrapper[4861]: I0129 06:52:17.939536 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:17 crc kubenswrapper[4861]: E0129 06:52:17.939776 4861 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:17 crc kubenswrapper[4861]: E0129 06:52:17.939856 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert podName:b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:21.939833136 +0000 UTC m=+1033.611327693 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert") pod "infra-operator-controller-manager-79955696d6-p2vh6" (UID: "b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5") : secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:17 crc kubenswrapper[4861]: E0129 06:52:17.978208 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" podUID="30562a65-f3e8-4b37-b722-d1c0a8c03996" Jan 29 06:52:17 crc kubenswrapper[4861]: E0129 06:52:17.978330 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" podUID="79ee9746-9925-45d6-b37b-06ddee279d38" Jan 29 06:52:17 crc kubenswrapper[4861]: E0129 06:52:17.978741 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" podUID="e2e876a2-9fd5-4811-bd40-c3e07276fe2b" Jan 29 06:52:18 crc kubenswrapper[4861]: I0129 06:52:18.243208 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:18 crc kubenswrapper[4861]: E0129 06:52:18.243388 4861 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:18 crc kubenswrapper[4861]: E0129 06:52:18.243521 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert podName:9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:22.243505974 +0000 UTC m=+1033.915000531 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert") pod "openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" (UID: "9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:18 crc kubenswrapper[4861]: I0129 06:52:18.649874 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:18 crc kubenswrapper[4861]: I0129 06:52:18.650108 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:18 crc kubenswrapper[4861]: E0129 06:52:18.650118 4861 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 06:52:18 crc kubenswrapper[4861]: E0129 06:52:18.650214 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:22.650190192 +0000 UTC m=+1034.321684759 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "metrics-server-cert" not found Jan 29 06:52:18 crc kubenswrapper[4861]: E0129 06:52:18.650290 4861 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 06:52:18 crc kubenswrapper[4861]: E0129 06:52:18.650379 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:22.650351277 +0000 UTC m=+1034.321845874 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "webhook-server-cert" not found Jan 29 06:52:21 crc kubenswrapper[4861]: I0129 06:52:21.996137 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:21 crc kubenswrapper[4861]: E0129 06:52:21.996373 4861 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:21 crc kubenswrapper[4861]: E0129 06:52:21.996719 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert podName:b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:29.996700355 +0000 UTC m=+1041.668194922 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert") pod "infra-operator-controller-manager-79955696d6-p2vh6" (UID: "b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5") : secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:22 crc kubenswrapper[4861]: I0129 06:52:22.303984 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:22 crc kubenswrapper[4861]: E0129 06:52:22.304189 4861 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:22 crc kubenswrapper[4861]: E0129 06:52:22.304431 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert podName:9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:30.304409639 +0000 UTC m=+1041.975904266 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert") pod "openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" (UID: "9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:22 crc kubenswrapper[4861]: I0129 06:52:22.709889 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:22 crc kubenswrapper[4861]: I0129 06:52:22.710012 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:22 crc kubenswrapper[4861]: E0129 06:52:22.710201 4861 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 06:52:22 crc kubenswrapper[4861]: E0129 06:52:22.710253 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:30.710237785 +0000 UTC m=+1042.381732352 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "webhook-server-cert" not found Jan 29 06:52:22 crc kubenswrapper[4861]: E0129 06:52:22.710649 4861 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 06:52:22 crc kubenswrapper[4861]: E0129 06:52:22.710685 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:30.710675126 +0000 UTC m=+1042.382169693 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "metrics-server-cert" not found Jan 29 06:52:27 crc kubenswrapper[4861]: E0129 06:52:27.926619 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:cd911e8d7a7a1104d77691dbaaf54370015cbb82859337746db5a9186d5dc566" Jan 29 06:52:27 crc kubenswrapper[4861]: E0129 06:52:27.926958 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:cd911e8d7a7a1104d77691dbaaf54370015cbb82859337746db5a9186d5dc566,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kvc8r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-7dd968899f-7hqbq_openstack-operators(444df6a8-8c8f-4f2f-8092-1dd392f6eed1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:52:27 crc kubenswrapper[4861]: E0129 06:52:27.928262 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" podUID="444df6a8-8c8f-4f2f-8092-1dd392f6eed1" Jan 29 06:52:28 crc kubenswrapper[4861]: E0129 06:52:28.094665 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:cd911e8d7a7a1104d77691dbaaf54370015cbb82859337746db5a9186d5dc566\\\"\"" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" podUID="444df6a8-8c8f-4f2f-8092-1dd392f6eed1" Jan 29 06:52:28 crc kubenswrapper[4861]: E0129 06:52:28.684133 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf" Jan 29 06:52:28 crc kubenswrapper[4861]: E0129 06:52:28.684311 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lt8rq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-67bf948998-2tjv4_openstack-operators(4ffdee81-542f-424a-b4ed-0db4b0ad2409): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:52:28 crc kubenswrapper[4861]: E0129 06:52:28.686176 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" podUID="4ffdee81-542f-424a-b4ed-0db4b0ad2409" Jan 29 06:52:29 crc kubenswrapper[4861]: E0129 06:52:29.060617 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" podUID="4ffdee81-542f-424a-b4ed-0db4b0ad2409" Jan 29 06:52:29 crc kubenswrapper[4861]: E0129 06:52:29.463670 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382" Jan 29 06:52:29 crc kubenswrapper[4861]: E0129 06:52:29.464148 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jv6z9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-68fc8c869-jpq7j_openstack-operators(6bbf867e-a436-4e97-aaa5-a77f4ab796ee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:52:29 crc kubenswrapper[4861]: E0129 06:52:29.465277 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" podUID="6bbf867e-a436-4e97-aaa5-a77f4ab796ee" Jan 29 06:52:29 crc kubenswrapper[4861]: E0129 06:52:29.964263 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6" Jan 29 06:52:29 crc kubenswrapper[4861]: E0129 06:52:29.964464 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4c8lc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-585dbc889-nszsj_openstack-operators(5d6f19fb-11f0-4854-8590-b97ecb2e2ab7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:52:29 crc kubenswrapper[4861]: E0129 06:52:29.965654 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" podUID="5d6f19fb-11f0-4854-8590-b97ecb2e2ab7" Jan 29 06:52:30 crc kubenswrapper[4861]: I0129 06:52:30.014216 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.014649 4861 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.014719 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert podName:b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:46.014697785 +0000 UTC m=+1057.686192382 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert") pod "infra-operator-controller-manager-79955696d6-p2vh6" (UID: "b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5") : secret "infra-operator-webhook-server-cert" not found Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.067546 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" podUID="5d6f19fb-11f0-4854-8590-b97ecb2e2ab7" Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.067663 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382\\\"\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" podUID="6bbf867e-a436-4e97-aaa5-a77f4ab796ee" Jan 29 06:52:30 crc kubenswrapper[4861]: I0129 06:52:30.318279 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.318455 4861 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.318506 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert podName:9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48 nodeName:}" failed. No retries permitted until 2026-01-29 06:52:46.318490187 +0000 UTC m=+1057.989984744 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert") pod "openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" (UID: "9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.562745 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488" Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.563006 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mjdmz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5b964cf4cd-2blwh_openstack-operators(45beb56d-51b6-4737-b0f7-cd7db1d86942): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.564304 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" podUID="45beb56d-51b6-4737-b0f7-cd7db1d86942" Jan 29 06:52:30 crc kubenswrapper[4861]: I0129 06:52:30.723771 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:30 crc kubenswrapper[4861]: I0129 06:52:30.724257 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.723943 4861 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.724377 4861 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.724509 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:46.724489577 +0000 UTC m=+1058.395984134 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "webhook-server-cert" not found Jan 29 06:52:30 crc kubenswrapper[4861]: E0129 06:52:30.724571 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs podName:8f8a9499-a68a-4797-9801-a070bff21b9f nodeName:}" failed. No retries permitted until 2026-01-29 06:52:46.724531348 +0000 UTC m=+1058.396025905 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs") pod "openstack-operator-controller-manager-7b54f464f6-95nrq" (UID: "8f8a9499-a68a-4797-9801-a070bff21b9f") : secret "metrics-server-cert" not found Jan 29 06:52:31 crc kubenswrapper[4861]: E0129 06:52:31.073537 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" podUID="45beb56d-51b6-4737-b0f7-cd7db1d86942" Jan 29 06:52:31 crc kubenswrapper[4861]: E0129 06:52:31.119086 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e" Jan 29 06:52:31 crc kubenswrapper[4861]: E0129 06:52:31.119248 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t7kzj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-55bff696bd-xjbh9_openstack-operators(4c9a63c6-aa31-43cc-8b09-34a877dc2957): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:52:31 crc kubenswrapper[4861]: E0129 06:52:31.120362 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" podUID="4c9a63c6-aa31-43cc-8b09-34a877dc2957" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.087379 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" event={"ID":"92f620b6-be39-449e-a1fc-ff64804364b5","Type":"ContainerStarted","Data":"9c162a33d3319dce307156817d311f0d4ec200e6ad68365620b341b857265729"} Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.088616 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.098716 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" event={"ID":"0e2f821a-5976-405b-860f-fc5c14ca3c06","Type":"ContainerStarted","Data":"efd1867c830058792baecc2036e5b43bdc19a22172b71ac50af15d29673a4689"} Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.099318 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.108740 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" event={"ID":"e3e0da3c-3983-46a4-b9fe-ef07be8ca90e","Type":"ContainerStarted","Data":"4a55bad9069700ac550aece6bb287d8caff9dad1c95019695f5eb482d6257d4a"} Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.109687 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.122607 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" event={"ID":"0362a774-9d2b-491c-9a6b-96811db0d456","Type":"ContainerStarted","Data":"ddddaab13afd32b703879ca65d4be1da877285b4b5d313ac2d8e77fc09798efa"} Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.123229 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.134488 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" event={"ID":"6cd88ee1-a38e-43f0-9470-d7a9745665df","Type":"ContainerStarted","Data":"451a9147c8249e1d4a17135bc740c8c13dcdda64753aebc972d64cd353b3e58c"} Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.135165 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.144057 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" event={"ID":"c5c7a9b1-011f-435e-8a50-dd0ee333a811","Type":"ContainerStarted","Data":"bac4164bbe24089446f2ba1308c7aff8f7dc34523c64f89353c8a1da0e3b612b"} Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.144562 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.150701 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" podStartSLOduration=3.160197977 podStartE2EDuration="19.150678532s" podCreationTimestamp="2026-01-29 06:52:13 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.363789973 +0000 UTC m=+1027.035284530" lastFinishedPulling="2026-01-29 06:52:31.354270498 +0000 UTC m=+1043.025765085" observedRunningTime="2026-01-29 06:52:32.144211874 +0000 UTC m=+1043.815706441" watchObservedRunningTime="2026-01-29 06:52:32.150678532 +0000 UTC m=+1043.822173089" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.157893 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" event={"ID":"7c81b981-2cba-4cc6-a5c8-aa3d86378e3a","Type":"ContainerStarted","Data":"07a6874ad5b2e34a06bb687a187ee752ca49b3e73e015863e8898970ef9a1d4c"} Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.158227 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.163593 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" event={"ID":"b708431b-9a40-4216-8ff2-e52626a78852","Type":"ContainerStarted","Data":"c42d50ed6fdccb63b8efb33039c0a154a524ab124586b669d8ce8294c42a6893"} Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.163630 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" Jan 29 06:52:32 crc kubenswrapper[4861]: E0129 06:52:32.167221 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:5340b88039fac393da49ef4e181b2720c809c27a6bb30531a07a49342a1da45e\\\"\"" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" podUID="4c9a63c6-aa31-43cc-8b09-34a877dc2957" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.185548 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" podStartSLOduration=3.197480306 podStartE2EDuration="19.185526768s" podCreationTimestamp="2026-01-29 06:52:13 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.396212286 +0000 UTC m=+1027.067706843" lastFinishedPulling="2026-01-29 06:52:31.384258718 +0000 UTC m=+1043.055753305" observedRunningTime="2026-01-29 06:52:32.180141138 +0000 UTC m=+1043.851635695" watchObservedRunningTime="2026-01-29 06:52:32.185526768 +0000 UTC m=+1043.857021325" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.215575 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" podStartSLOduration=2.444379428 podStartE2EDuration="18.215559239s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.661378953 +0000 UTC m=+1027.332873510" lastFinishedPulling="2026-01-29 06:52:31.432558764 +0000 UTC m=+1043.104053321" observedRunningTime="2026-01-29 06:52:32.212996873 +0000 UTC m=+1043.884491440" watchObservedRunningTime="2026-01-29 06:52:32.215559239 +0000 UTC m=+1043.887053796" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.257103 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" podStartSLOduration=2.7783693449999998 podStartE2EDuration="19.257086599s" podCreationTimestamp="2026-01-29 06:52:13 +0000 UTC" firstStartedPulling="2026-01-29 06:52:14.899428455 +0000 UTC m=+1026.570923012" lastFinishedPulling="2026-01-29 06:52:31.378145699 +0000 UTC m=+1043.049640266" observedRunningTime="2026-01-29 06:52:32.23325403 +0000 UTC m=+1043.904748597" watchObservedRunningTime="2026-01-29 06:52:32.257086599 +0000 UTC m=+1043.928581156" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.257942 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" podStartSLOduration=3.237198131 podStartE2EDuration="19.257936712s" podCreationTimestamp="2026-01-29 06:52:13 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.362197902 +0000 UTC m=+1027.033692459" lastFinishedPulling="2026-01-29 06:52:31.382936453 +0000 UTC m=+1043.054431040" observedRunningTime="2026-01-29 06:52:32.253431764 +0000 UTC m=+1043.924926321" watchObservedRunningTime="2026-01-29 06:52:32.257936712 +0000 UTC m=+1043.929431259" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.281214 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" podStartSLOduration=2.289308185 podStartE2EDuration="18.281199387s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.393015703 +0000 UTC m=+1027.064510260" lastFinishedPulling="2026-01-29 06:52:31.384906905 +0000 UTC m=+1043.056401462" observedRunningTime="2026-01-29 06:52:32.280332354 +0000 UTC m=+1043.951826911" watchObservedRunningTime="2026-01-29 06:52:32.281199387 +0000 UTC m=+1043.952693944" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.338981 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" podStartSLOduration=3.331913483 podStartE2EDuration="19.338957089s" podCreationTimestamp="2026-01-29 06:52:13 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.377705005 +0000 UTC m=+1027.049199562" lastFinishedPulling="2026-01-29 06:52:31.384748601 +0000 UTC m=+1043.056243168" observedRunningTime="2026-01-29 06:52:32.308709852 +0000 UTC m=+1043.980204439" watchObservedRunningTime="2026-01-29 06:52:32.338957089 +0000 UTC m=+1044.010451636" Jan 29 06:52:32 crc kubenswrapper[4861]: I0129 06:52:32.370264 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" podStartSLOduration=3.043012329 podStartE2EDuration="19.370245643s" podCreationTimestamp="2026-01-29 06:52:13 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.065556726 +0000 UTC m=+1026.737051283" lastFinishedPulling="2026-01-29 06:52:31.39279003 +0000 UTC m=+1043.064284597" observedRunningTime="2026-01-29 06:52:32.369425982 +0000 UTC m=+1044.040920549" watchObservedRunningTime="2026-01-29 06:52:32.370245643 +0000 UTC m=+1044.041740200" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.214422 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" event={"ID":"a8e928e7-8173-4e7d-ae37-26eaeec75ccb","Type":"ContainerStarted","Data":"8454db9d10bdb17e6c298222fdd6e8abff7eb8ffa77aed6918825bf2cbc56d94"} Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.215783 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.217232 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" event={"ID":"30562a65-f3e8-4b37-b722-d1c0a8c03996","Type":"ContainerStarted","Data":"f8b3626355fb5fe4eb5c250f2548930f9f18e812e679778e9547b2f9dea692b6"} Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.217635 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.219244 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" event={"ID":"eede0800-ba92-4df3-9115-6b97f05620da","Type":"ContainerStarted","Data":"a22e7db470df4938e6f448e6c7e7578742f824e2694b725c554c4d0c57922d2c"} Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.219582 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.220904 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" event={"ID":"e2b67e50-9400-44b6-b1ba-b6185f758932","Type":"ContainerStarted","Data":"f0a9391a64db1d55f46a61151214725666e858a2eb46ec97fd24a0baeb07f881"} Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.221247 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.222149 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" event={"ID":"e2e876a2-9fd5-4811-bd40-c3e07276fe2b","Type":"ContainerStarted","Data":"472c1d238819f0a75a8c6be8f72bb700216d7cd4a94ca52aec49914e4220f803"} Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.223212 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" event={"ID":"79ee9746-9925-45d6-b37b-06ddee279d38","Type":"ContainerStarted","Data":"5a9957a3915f4b6349db874f1056cd26e348a66eefb3152742c7836706f0cb71"} Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.223582 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.235112 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" podStartSLOduration=2.856670263 podStartE2EDuration="24.235094769s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.745032259 +0000 UTC m=+1027.416526816" lastFinishedPulling="2026-01-29 06:52:37.123456725 +0000 UTC m=+1048.794951322" observedRunningTime="2026-01-29 06:52:38.232808699 +0000 UTC m=+1049.904303306" watchObservedRunningTime="2026-01-29 06:52:38.235094769 +0000 UTC m=+1049.906589356" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.248806 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" podStartSLOduration=2.879966159 podStartE2EDuration="24.248785865s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.755023399 +0000 UTC m=+1027.426517956" lastFinishedPulling="2026-01-29 06:52:37.123843095 +0000 UTC m=+1048.795337662" observedRunningTime="2026-01-29 06:52:38.246490885 +0000 UTC m=+1049.917985452" watchObservedRunningTime="2026-01-29 06:52:38.248785865 +0000 UTC m=+1049.920280432" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.262187 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" podStartSLOduration=3.004822957 podStartE2EDuration="24.262161983s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.921702454 +0000 UTC m=+1027.593197011" lastFinishedPulling="2026-01-29 06:52:37.17904147 +0000 UTC m=+1048.850536037" observedRunningTime="2026-01-29 06:52:38.258701983 +0000 UTC m=+1049.930196600" watchObservedRunningTime="2026-01-29 06:52:38.262161983 +0000 UTC m=+1049.933656580" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.318048 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" podStartSLOduration=3.110237638 podStartE2EDuration="24.318030706s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.910172504 +0000 UTC m=+1027.581667061" lastFinishedPulling="2026-01-29 06:52:37.117965542 +0000 UTC m=+1048.789460129" observedRunningTime="2026-01-29 06:52:38.270760506 +0000 UTC m=+1049.942255103" watchObservedRunningTime="2026-01-29 06:52:38.318030706 +0000 UTC m=+1049.989525263" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.319990 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" podStartSLOduration=2.9498926389999998 podStartE2EDuration="24.319984387s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.75504558 +0000 UTC m=+1027.426540127" lastFinishedPulling="2026-01-29 06:52:37.125137318 +0000 UTC m=+1048.796631875" observedRunningTime="2026-01-29 06:52:38.302271746 +0000 UTC m=+1049.973766313" watchObservedRunningTime="2026-01-29 06:52:38.319984387 +0000 UTC m=+1049.991478944" Jan 29 06:52:38 crc kubenswrapper[4861]: I0129 06:52:38.338537 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-t7725" podStartSLOduration=3.081007078 podStartE2EDuration="24.338521289s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.922343701 +0000 UTC m=+1027.593838258" lastFinishedPulling="2026-01-29 06:52:37.179857882 +0000 UTC m=+1048.851352469" observedRunningTime="2026-01-29 06:52:38.329919735 +0000 UTC m=+1050.001414302" watchObservedRunningTime="2026-01-29 06:52:38.338521289 +0000 UTC m=+1050.010015846" Jan 29 06:52:39 crc kubenswrapper[4861]: I0129 06:52:39.121859 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 06:52:40 crc kubenswrapper[4861]: I0129 06:52:40.240393 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" event={"ID":"444df6a8-8c8f-4f2f-8092-1dd392f6eed1","Type":"ContainerStarted","Data":"0b8dee21a46179a01906619c5b198afb9b7f5f309ebda7f9610b5fad3f1c5425"} Jan 29 06:52:40 crc kubenswrapper[4861]: I0129 06:52:40.242357 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" Jan 29 06:52:42 crc kubenswrapper[4861]: I0129 06:52:42.141708 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" podStartSLOduration=4.234957074 podStartE2EDuration="28.14168715s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.661661441 +0000 UTC m=+1027.333155998" lastFinishedPulling="2026-01-29 06:52:39.568391497 +0000 UTC m=+1051.239886074" observedRunningTime="2026-01-29 06:52:40.263430686 +0000 UTC m=+1051.934925253" watchObservedRunningTime="2026-01-29 06:52:42.14168715 +0000 UTC m=+1053.813181747" Jan 29 06:52:43 crc kubenswrapper[4861]: I0129 06:52:43.266636 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" event={"ID":"4ffdee81-542f-424a-b4ed-0db4b0ad2409","Type":"ContainerStarted","Data":"2de9a74f0e8b4797c11b93fb51d5c84729aeb62467bf2421a454aada798a4bd9"} Jan 29 06:52:43 crc kubenswrapper[4861]: I0129 06:52:43.267252 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" Jan 29 06:52:43 crc kubenswrapper[4861]: I0129 06:52:43.291925 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" podStartSLOduration=2.39020386 podStartE2EDuration="29.291898716s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.634036802 +0000 UTC m=+1027.305531359" lastFinishedPulling="2026-01-29 06:52:42.535731658 +0000 UTC m=+1054.207226215" observedRunningTime="2026-01-29 06:52:43.285362866 +0000 UTC m=+1054.956857463" watchObservedRunningTime="2026-01-29 06:52:43.291898716 +0000 UTC m=+1054.963393293" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.252159 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-s68hj" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.259511 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-tx9q6" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.361971 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-7gs4c" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.378383 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-dw8n5" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.390031 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-25p9x" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.450725 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-ps8hr" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.521866 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-txmj4" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.539380 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-v4qvf" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.558097 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-7hqbq" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.673790 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-7cw4w" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.706928 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-p8vt7" Jan 29 06:52:44 crc kubenswrapper[4861]: I0129 06:52:44.886461 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-wwnvz" Jan 29 06:52:45 crc kubenswrapper[4861]: I0129 06:52:45.034099 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-564965969-xckfp" Jan 29 06:52:45 crc kubenswrapper[4861]: I0129 06:52:45.039585 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-rw2h2" Jan 29 06:52:45 crc kubenswrapper[4861]: I0129 06:52:45.285003 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" event={"ID":"6bbf867e-a436-4e97-aaa5-a77f4ab796ee","Type":"ContainerStarted","Data":"ffbcfc4553d05c63ede1dddb4b76bc79947e87dce9bd2e3fd38c33cd0543ba31"} Jan 29 06:52:45 crc kubenswrapper[4861]: I0129 06:52:45.285373 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" Jan 29 06:52:45 crc kubenswrapper[4861]: I0129 06:52:45.306872 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" podStartSLOduration=2.494062122 podStartE2EDuration="31.306849126s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.742530074 +0000 UTC m=+1027.414024631" lastFinishedPulling="2026-01-29 06:52:44.555317068 +0000 UTC m=+1056.226811635" observedRunningTime="2026-01-29 06:52:45.304467764 +0000 UTC m=+1056.975962331" watchObservedRunningTime="2026-01-29 06:52:45.306849126 +0000 UTC m=+1056.978343713" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.065874 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.076301 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5-cert\") pod \"infra-operator-controller-manager-79955696d6-p2vh6\" (UID: \"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.294483 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" event={"ID":"5d6f19fb-11f0-4854-8590-b97ecb2e2ab7","Type":"ContainerStarted","Data":"febeedd445888a2d183a21b913d79ff13d54505acc0b4c11532f573db26e4436"} Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.295611 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.297923 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" event={"ID":"4c9a63c6-aa31-43cc-8b09-34a877dc2957","Type":"ContainerStarted","Data":"2a912803c245333e66fe75b6f9a2d849ba0b273c12812a5df09c5b8ccd0fc44a"} Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.298565 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.302634 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.330802 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" podStartSLOduration=2.491126355 podStartE2EDuration="32.330779188s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.768963012 +0000 UTC m=+1027.440457569" lastFinishedPulling="2026-01-29 06:52:45.608615805 +0000 UTC m=+1057.280110402" observedRunningTime="2026-01-29 06:52:46.321046965 +0000 UTC m=+1057.992541552" watchObservedRunningTime="2026-01-29 06:52:46.330779188 +0000 UTC m=+1058.002273765" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.347630 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" podStartSLOduration=2.483847785 podStartE2EDuration="32.347606685s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.743797167 +0000 UTC m=+1027.415291724" lastFinishedPulling="2026-01-29 06:52:45.607556057 +0000 UTC m=+1057.279050624" observedRunningTime="2026-01-29 06:52:46.339047243 +0000 UTC m=+1058.010541860" watchObservedRunningTime="2026-01-29 06:52:46.347606685 +0000 UTC m=+1058.019101252" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.371820 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.377242 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48-cert\") pod \"openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw\" (UID: \"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.624380 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.761551 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6"] Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.777053 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.777175 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.783619 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-webhook-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.790244 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8f8a9499-a68a-4797-9801-a070bff21b9f-metrics-certs\") pod \"openstack-operator-controller-manager-7b54f464f6-95nrq\" (UID: \"8f8a9499-a68a-4797-9801-a070bff21b9f\") " pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:46 crc kubenswrapper[4861]: I0129 06:52:46.949130 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:47 crc kubenswrapper[4861]: I0129 06:52:47.112326 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw"] Jan 29 06:52:47 crc kubenswrapper[4861]: I0129 06:52:47.304897 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" event={"ID":"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5","Type":"ContainerStarted","Data":"40f9cfff7deba45cc2f2db2c291437de938471c4e8a90ab91049e773c78b3380"} Jan 29 06:52:47 crc kubenswrapper[4861]: I0129 06:52:47.306145 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" event={"ID":"45beb56d-51b6-4737-b0f7-cd7db1d86942","Type":"ContainerStarted","Data":"d14a6f0fbc5acf8de1ec97650f919522ebcd65e85d4b953d950154a2feb6ac75"} Jan 29 06:52:47 crc kubenswrapper[4861]: I0129 06:52:47.306453 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" Jan 29 06:52:47 crc kubenswrapper[4861]: I0129 06:52:47.307548 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" event={"ID":"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48","Type":"ContainerStarted","Data":"62e4839ad8c106cccc7396028550a76a54c86abac9fd0d9b291bf50f8d064491"} Jan 29 06:52:47 crc kubenswrapper[4861]: I0129 06:52:47.330137 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" podStartSLOduration=2.433062535 podStartE2EDuration="33.330114951s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:15.652644036 +0000 UTC m=+1027.324138593" lastFinishedPulling="2026-01-29 06:52:46.549696452 +0000 UTC m=+1058.221191009" observedRunningTime="2026-01-29 06:52:47.32315855 +0000 UTC m=+1058.994653167" watchObservedRunningTime="2026-01-29 06:52:47.330114951 +0000 UTC m=+1059.001609548" Jan 29 06:52:47 crc kubenswrapper[4861]: I0129 06:52:47.480634 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq"] Jan 29 06:52:47 crc kubenswrapper[4861]: W0129 06:52:47.491567 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f8a9499_a68a_4797_9801_a070bff21b9f.slice/crio-32ca5e05fa3162966fdc62076e6ef3f6a5d9162fe9ba687c3c7b90b187008df5 WatchSource:0}: Error finding container 32ca5e05fa3162966fdc62076e6ef3f6a5d9162fe9ba687c3c7b90b187008df5: Status 404 returned error can't find the container with id 32ca5e05fa3162966fdc62076e6ef3f6a5d9162fe9ba687c3c7b90b187008df5 Jan 29 06:52:48 crc kubenswrapper[4861]: I0129 06:52:48.334535 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" event={"ID":"8f8a9499-a68a-4797-9801-a070bff21b9f","Type":"ContainerStarted","Data":"ac0e9aac07525ece51969afaafe1dd47333b3b5e0961770ebb6f0a2ca5a17291"} Jan 29 06:52:48 crc kubenswrapper[4861]: I0129 06:52:48.334921 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" event={"ID":"8f8a9499-a68a-4797-9801-a070bff21b9f","Type":"ContainerStarted","Data":"32ca5e05fa3162966fdc62076e6ef3f6a5d9162fe9ba687c3c7b90b187008df5"} Jan 29 06:52:48 crc kubenswrapper[4861]: I0129 06:52:48.375574 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" podStartSLOduration=34.375555803 podStartE2EDuration="34.375555803s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:52:48.368436378 +0000 UTC m=+1060.039931015" watchObservedRunningTime="2026-01-29 06:52:48.375555803 +0000 UTC m=+1060.047050360" Jan 29 06:52:49 crc kubenswrapper[4861]: I0129 06:52:49.343771 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" event={"ID":"b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5","Type":"ContainerStarted","Data":"f2dd7f7f455029d76933c955db4f4c73ac73fe004b57a0f00bda6d8dea3539f7"} Jan 29 06:52:49 crc kubenswrapper[4861]: I0129 06:52:49.344279 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:52:50 crc kubenswrapper[4861]: I0129 06:52:50.354740 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" event={"ID":"9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48","Type":"ContainerStarted","Data":"18a1f4599eaa74b416076ba252de6044c58809adb1f1592a6c1c5c96c12ab409"} Jan 29 06:52:50 crc kubenswrapper[4861]: I0129 06:52:50.355130 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:50 crc kubenswrapper[4861]: I0129 06:52:50.417758 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" podStartSLOduration=35.264872593 podStartE2EDuration="37.41772221s" podCreationTimestamp="2026-01-29 06:52:13 +0000 UTC" firstStartedPulling="2026-01-29 06:52:46.76993689 +0000 UTC m=+1058.441431457" lastFinishedPulling="2026-01-29 06:52:48.922786517 +0000 UTC m=+1060.594281074" observedRunningTime="2026-01-29 06:52:49.366093537 +0000 UTC m=+1061.037588114" watchObservedRunningTime="2026-01-29 06:52:50.41772221 +0000 UTC m=+1062.089216807" Jan 29 06:52:50 crc kubenswrapper[4861]: I0129 06:52:50.423913 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" podStartSLOduration=33.702931468 podStartE2EDuration="36.42389087s" podCreationTimestamp="2026-01-29 06:52:14 +0000 UTC" firstStartedPulling="2026-01-29 06:52:47.128820005 +0000 UTC m=+1058.800314562" lastFinishedPulling="2026-01-29 06:52:49.849779407 +0000 UTC m=+1061.521273964" observedRunningTime="2026-01-29 06:52:50.410699487 +0000 UTC m=+1062.082194104" watchObservedRunningTime="2026-01-29 06:52:50.42389087 +0000 UTC m=+1062.095385487" Jan 29 06:52:51 crc kubenswrapper[4861]: I0129 06:52:51.363214 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:54 crc kubenswrapper[4861]: I0129 06:52:54.642682 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-2tjv4" Jan 29 06:52:54 crc kubenswrapper[4861]: I0129 06:52:54.651100 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-nszsj" Jan 29 06:52:54 crc kubenswrapper[4861]: I0129 06:52:54.674328 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-xjbh9" Jan 29 06:52:54 crc kubenswrapper[4861]: I0129 06:52:54.728690 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-2blwh" Jan 29 06:52:54 crc kubenswrapper[4861]: I0129 06:52:54.816696 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-jpq7j" Jan 29 06:52:56 crc kubenswrapper[4861]: I0129 06:52:56.314411 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79955696d6-p2vh6" Jan 29 06:52:56 crc kubenswrapper[4861]: I0129 06:52:56.634837 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw" Jan 29 06:52:56 crc kubenswrapper[4861]: I0129 06:52:56.955937 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7b54f464f6-95nrq" Jan 29 06:53:00 crc kubenswrapper[4861]: I0129 06:53:00.629600 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:53:00 crc kubenswrapper[4861]: I0129 06:53:00.629866 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.725906 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-f9w6p"] Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.727403 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.730180 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-hmtl5" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.730504 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.730521 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.737892 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-f9w6p"] Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.743898 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.788853 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9n4bv"] Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.789846 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.792423 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.809812 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9n4bv"] Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.885980 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svt2k\" (UniqueName: \"kubernetes.io/projected/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-kube-api-access-svt2k\") pod \"dnsmasq-dns-84bb9d8bd9-f9w6p\" (UID: \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.886147 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-config\") pod \"dnsmasq-dns-84bb9d8bd9-f9w6p\" (UID: \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.886177 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-config\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.886208 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zq4s\" (UniqueName: \"kubernetes.io/projected/edf10354-bc30-4c70-9104-81c2e081bcee-kube-api-access-6zq4s\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.886252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-dns-svc\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.987654 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-config\") pod \"dnsmasq-dns-84bb9d8bd9-f9w6p\" (UID: \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.987721 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-config\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.987767 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zq4s\" (UniqueName: \"kubernetes.io/projected/edf10354-bc30-4c70-9104-81c2e081bcee-kube-api-access-6zq4s\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.987887 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-dns-svc\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.987976 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svt2k\" (UniqueName: \"kubernetes.io/projected/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-kube-api-access-svt2k\") pod \"dnsmasq-dns-84bb9d8bd9-f9w6p\" (UID: \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.989448 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-config\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.989641 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-dns-svc\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:12 crc kubenswrapper[4861]: I0129 06:53:12.990456 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-config\") pod \"dnsmasq-dns-84bb9d8bd9-f9w6p\" (UID: \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:13 crc kubenswrapper[4861]: I0129 06:53:13.010565 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svt2k\" (UniqueName: \"kubernetes.io/projected/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-kube-api-access-svt2k\") pod \"dnsmasq-dns-84bb9d8bd9-f9w6p\" (UID: \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:13 crc kubenswrapper[4861]: I0129 06:53:13.013934 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zq4s\" (UniqueName: \"kubernetes.io/projected/edf10354-bc30-4c70-9104-81c2e081bcee-kube-api-access-6zq4s\") pod \"dnsmasq-dns-5f854695bc-9n4bv\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:13 crc kubenswrapper[4861]: I0129 06:53:13.044952 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:13 crc kubenswrapper[4861]: I0129 06:53:13.108468 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:13 crc kubenswrapper[4861]: I0129 06:53:13.494966 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9n4bv"] Jan 29 06:53:13 crc kubenswrapper[4861]: I0129 06:53:13.548189 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" event={"ID":"edf10354-bc30-4c70-9104-81c2e081bcee","Type":"ContainerStarted","Data":"e8824d85e175394979b3feaeee35491324536ed364d34503af105e2f51195d39"} Jan 29 06:53:13 crc kubenswrapper[4861]: I0129 06:53:13.553840 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-f9w6p"] Jan 29 06:53:13 crc kubenswrapper[4861]: W0129 06:53:13.561392 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod330a83ea_3e4a_4ae5_9f14_7b3d0aa3aec0.slice/crio-1debd21e234e9d777f6a93a0a1997ebfc18fffd33f525f87addb633f2f9c67b8 WatchSource:0}: Error finding container 1debd21e234e9d777f6a93a0a1997ebfc18fffd33f525f87addb633f2f9c67b8: Status 404 returned error can't find the container with id 1debd21e234e9d777f6a93a0a1997ebfc18fffd33f525f87addb633f2f9c67b8 Jan 29 06:53:14 crc kubenswrapper[4861]: I0129 06:53:14.558137 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" event={"ID":"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0","Type":"ContainerStarted","Data":"1debd21e234e9d777f6a93a0a1997ebfc18fffd33f525f87addb633f2f9c67b8"} Jan 29 06:53:14 crc kubenswrapper[4861]: I0129 06:53:14.776562 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9n4bv"] Jan 29 06:53:14 crc kubenswrapper[4861]: I0129 06:53:14.802550 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c7cbb8f79-7dbcs"] Jan 29 06:53:14 crc kubenswrapper[4861]: I0129 06:53:14.804005 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:14 crc kubenswrapper[4861]: I0129 06:53:14.813651 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c7cbb8f79-7dbcs"] Jan 29 06:53:14 crc kubenswrapper[4861]: I0129 06:53:14.923837 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-config\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:14 crc kubenswrapper[4861]: I0129 06:53:14.923884 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-dns-svc\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:14 crc kubenswrapper[4861]: I0129 06:53:14.923930 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lrg2\" (UniqueName: \"kubernetes.io/projected/d442990e-f715-44bc-86d7-92259130a2f5-kube-api-access-8lrg2\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.025252 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-config\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.025318 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-dns-svc\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.025370 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lrg2\" (UniqueName: \"kubernetes.io/projected/d442990e-f715-44bc-86d7-92259130a2f5-kube-api-access-8lrg2\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.026491 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-config\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.027166 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-dns-svc\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.050674 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lrg2\" (UniqueName: \"kubernetes.io/projected/d442990e-f715-44bc-86d7-92259130a2f5-kube-api-access-8lrg2\") pod \"dnsmasq-dns-c7cbb8f79-7dbcs\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.132728 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.590168 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c7cbb8f79-7dbcs"] Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.665372 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-f9w6p"] Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.691494 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-fvf8f"] Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.693890 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.712051 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-fvf8f"] Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.836652 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-config\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.836713 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-dns-svc\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.836754 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnntw\" (UniqueName: \"kubernetes.io/projected/76403591-49dd-48f3-976f-34a9b6d7ba8a-kube-api-access-gnntw\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.928588 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.933636 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.938312 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.939612 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.939916 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.940259 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-knw2w" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.940344 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-config\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.940403 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-dns-svc\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.940444 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnntw\" (UniqueName: \"kubernetes.io/projected/76403591-49dd-48f3-976f-34a9b6d7ba8a-kube-api-access-gnntw\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.940813 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.941136 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.941434 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.942410 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-config\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.943206 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-dns-svc\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.971728 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnntw\" (UniqueName: \"kubernetes.io/projected/76403591-49dd-48f3-976f-34a9b6d7ba8a-kube-api-access-gnntw\") pod \"dnsmasq-dns-95f5f6995-fvf8f\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:15 crc kubenswrapper[4861]: I0129 06:53:15.977067 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.023642 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.042394 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b8b1385-123a-4b60-af39-82d6492a65c2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.042451 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.042487 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.042518 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b8b1385-123a-4b60-af39-82d6492a65c2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.042571 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.042662 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.042754 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.042849 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.045099 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.045143 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.045230 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqc5q\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-kube-api-access-bqc5q\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.146856 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147210 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147233 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqc5q\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-kube-api-access-bqc5q\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147275 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b8b1385-123a-4b60-af39-82d6492a65c2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147293 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147316 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147341 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b8b1385-123a-4b60-af39-82d6492a65c2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147365 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147381 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147412 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.147449 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.148213 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.148475 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.148486 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.148962 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.149536 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.152113 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.152359 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b8b1385-123a-4b60-af39-82d6492a65c2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.153847 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.155790 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.160148 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b8b1385-123a-4b60-af39-82d6492a65c2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.166948 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqc5q\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-kube-api-access-bqc5q\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.184278 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.255102 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.516039 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-fvf8f"] Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.587799 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.590890 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" event={"ID":"d442990e-f715-44bc-86d7-92259130a2f5","Type":"ContainerStarted","Data":"f69d3afff9418f85aaf40d794bc306156ba92eb45dd41c13064d1aea40bedc65"} Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.594154 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" event={"ID":"76403591-49dd-48f3-976f-34a9b6d7ba8a","Type":"ContainerStarted","Data":"1099740a029dfcee3f8ce345007a41584cfd2a73bcfb51d527d96d383fed1517"} Jan 29 06:53:16 crc kubenswrapper[4861]: W0129 06:53:16.594212 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b8b1385_123a_4b60_af39_82d6492a65c2.slice/crio-67a1475dfbada9b85e2a0e8ee90f3cf79235d0ae9ebb592aa8e44eea88f5924a WatchSource:0}: Error finding container 67a1475dfbada9b85e2a0e8ee90f3cf79235d0ae9ebb592aa8e44eea88f5924a: Status 404 returned error can't find the container with id 67a1475dfbada9b85e2a0e8ee90f3cf79235d0ae9ebb592aa8e44eea88f5924a Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.829798 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.831525 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.833821 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.834680 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.834855 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.834995 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-vw2zk" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.835865 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.835910 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.836415 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.838310 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.961458 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.961530 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.961875 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.961924 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5966cedc-8ab5-4390-906b-c5ac39333e09-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.962030 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.962060 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5966cedc-8ab5-4390-906b-c5ac39333e09-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.962105 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.962134 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.962278 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.962311 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:16 crc kubenswrapper[4861]: I0129 06:53:16.962351 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvspp\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-kube-api-access-mvspp\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.063909 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.063961 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvspp\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-kube-api-access-mvspp\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064006 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064033 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064057 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064104 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5966cedc-8ab5-4390-906b-c5ac39333e09-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064151 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064180 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5966cedc-8ab5-4390-906b-c5ac39333e09-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064207 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064237 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064255 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.064806 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.065250 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.065487 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.066163 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.067092 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.074118 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.074587 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.075327 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5966cedc-8ab5-4390-906b-c5ac39333e09-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.077341 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5966cedc-8ab5-4390-906b-c5ac39333e09-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.084782 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvspp\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-kube-api-access-mvspp\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.110892 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.126162 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.151298 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 06:53:17 crc kubenswrapper[4861]: I0129 06:53:17.616424 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b8b1385-123a-4b60-af39-82d6492a65c2","Type":"ContainerStarted","Data":"67a1475dfbada9b85e2a0e8ee90f3cf79235d0ae9ebb592aa8e44eea88f5924a"} Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.314732 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.315831 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.321502 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-vxbxf" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.322525 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.323289 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.323494 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.327121 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.327910 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.384736 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.384784 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.384810 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-default\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.384827 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.384891 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-kolla-config\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.384932 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.384958 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.384978 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk478\" (UniqueName: \"kubernetes.io/projected/8c370e6a-40e9-4055-857e-c8357c904c8e-kube-api-access-nk478\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.486191 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.486251 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.486270 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk478\" (UniqueName: \"kubernetes.io/projected/8c370e6a-40e9-4055-857e-c8357c904c8e-kube-api-access-nk478\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.486370 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.486437 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.486465 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-default\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.486479 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.486520 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-kolla-config\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.487554 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.490315 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-kolla-config\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.490481 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.491239 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.494860 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-default\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.496507 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.501206 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.505464 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk478\" (UniqueName: \"kubernetes.io/projected/8c370e6a-40e9-4055-857e-c8357c904c8e-kube-api-access-nk478\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.512920 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " pod="openstack/openstack-galera-0" Jan 29 06:53:18 crc kubenswrapper[4861]: I0129 06:53:18.653260 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.673800 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.681619 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.685043 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-dt4gs" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.685245 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.686036 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.686270 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.698980 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.812986 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.813055 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cf4z\" (UniqueName: \"kubernetes.io/projected/a0032cf8-4c45-4a4c-927d-686adba85ab1-kube-api-access-5cf4z\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.813165 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.813203 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.813304 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.813359 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.813406 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.813472 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.915500 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.915566 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.915591 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.915639 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.915682 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.915702 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cf4z\" (UniqueName: \"kubernetes.io/projected/a0032cf8-4c45-4a4c-927d-686adba85ab1-kube-api-access-5cf4z\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.915750 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.915771 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.916148 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.916891 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.918720 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.919205 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.919406 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.921647 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.925164 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.945786 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cf4z\" (UniqueName: \"kubernetes.io/projected/a0032cf8-4c45-4a4c-927d-686adba85ab1-kube-api-access-5cf4z\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:19 crc kubenswrapper[4861]: I0129 06:53:19.957313 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.014583 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.172776 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.174168 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.197906 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.198366 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-ks54j" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.198678 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.198717 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.220344 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-memcached-tls-certs\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.220429 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kolla-config\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.220555 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-config-data\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.220602 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4z6g\" (UniqueName: \"kubernetes.io/projected/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kube-api-access-d4z6g\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.220638 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-combined-ca-bundle\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.322904 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-config-data\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.323901 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-config-data\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.324579 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4z6g\" (UniqueName: \"kubernetes.io/projected/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kube-api-access-d4z6g\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.324644 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-combined-ca-bundle\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.324682 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-memcached-tls-certs\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.325472 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kolla-config\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.330514 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-combined-ca-bundle\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.330568 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-memcached-tls-certs\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.330868 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kolla-config\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.372636 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4z6g\" (UniqueName: \"kubernetes.io/projected/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kube-api-access-d4z6g\") pod \"memcached-0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " pod="openstack/memcached-0" Jan 29 06:53:20 crc kubenswrapper[4861]: I0129 06:53:20.499305 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 06:53:22 crc kubenswrapper[4861]: I0129 06:53:22.176099 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:53:22 crc kubenswrapper[4861]: I0129 06:53:22.178554 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 06:53:22 crc kubenswrapper[4861]: I0129 06:53:22.182553 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-djnzp" Jan 29 06:53:22 crc kubenswrapper[4861]: I0129 06:53:22.187308 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4p8q\" (UniqueName: \"kubernetes.io/projected/f091de47-47eb-4d84-92b8-c478bc6a7af7-kube-api-access-c4p8q\") pod \"kube-state-metrics-0\" (UID: \"f091de47-47eb-4d84-92b8-c478bc6a7af7\") " pod="openstack/kube-state-metrics-0" Jan 29 06:53:22 crc kubenswrapper[4861]: I0129 06:53:22.198310 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:53:22 crc kubenswrapper[4861]: I0129 06:53:22.287760 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4p8q\" (UniqueName: \"kubernetes.io/projected/f091de47-47eb-4d84-92b8-c478bc6a7af7-kube-api-access-c4p8q\") pod \"kube-state-metrics-0\" (UID: \"f091de47-47eb-4d84-92b8-c478bc6a7af7\") " pod="openstack/kube-state-metrics-0" Jan 29 06:53:22 crc kubenswrapper[4861]: I0129 06:53:22.306875 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4p8q\" (UniqueName: \"kubernetes.io/projected/f091de47-47eb-4d84-92b8-c478bc6a7af7-kube-api-access-c4p8q\") pod \"kube-state-metrics-0\" (UID: \"f091de47-47eb-4d84-92b8-c478bc6a7af7\") " pod="openstack/kube-state-metrics-0" Jan 29 06:53:22 crc kubenswrapper[4861]: I0129 06:53:22.514863 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.594436 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-z5wvn"] Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.596578 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.598645 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-8pg2k" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.599654 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.600004 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.606755 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-6n7w9"] Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.609051 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.624658 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-z5wvn"] Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.631400 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-6n7w9"] Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.651872 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-lib\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.652247 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-ovn-controller-tls-certs\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.652531 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shqvv\" (UniqueName: \"kubernetes.io/projected/5b52afb6-32de-4f14-9663-adeec08b4fad-kube-api-access-shqvv\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.652706 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-combined-ca-bundle\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.652896 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-log\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.653125 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/633f63c1-539f-4477-8aae-d6731a514280-scripts\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.653404 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8nbk\" (UniqueName: \"kubernetes.io/projected/633f63c1-539f-4477-8aae-d6731a514280-kube-api-access-l8nbk\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.653585 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b52afb6-32de-4f14-9663-adeec08b4fad-scripts\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.653733 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-log-ovn\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.653864 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run-ovn\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.654024 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-etc-ovs\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.654227 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-run\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.654398 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.755627 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b52afb6-32de-4f14-9663-adeec08b4fad-scripts\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.755866 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-log-ovn\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.755908 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run-ovn\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.755926 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-etc-ovs\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.755946 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-run\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.755963 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756000 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-lib\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756021 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-ovn-controller-tls-certs\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756091 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shqvv\" (UniqueName: \"kubernetes.io/projected/5b52afb6-32de-4f14-9663-adeec08b4fad-kube-api-access-shqvv\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756107 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-combined-ca-bundle\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756154 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-log\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756172 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/633f63c1-539f-4477-8aae-d6731a514280-scripts\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756225 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8nbk\" (UniqueName: \"kubernetes.io/projected/633f63c1-539f-4477-8aae-d6731a514280-kube-api-access-l8nbk\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756616 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run-ovn\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756777 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-etc-ovs\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.756799 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-run\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.757101 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-lib\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.757199 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.757598 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-log-ovn\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.757703 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-log\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.758946 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b52afb6-32de-4f14-9663-adeec08b4fad-scripts\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.759720 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/633f63c1-539f-4477-8aae-d6731a514280-scripts\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.763204 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-ovn-controller-tls-certs\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.771748 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-combined-ca-bundle\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.774560 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shqvv\" (UniqueName: \"kubernetes.io/projected/5b52afb6-32de-4f14-9663-adeec08b4fad-kube-api-access-shqvv\") pod \"ovn-controller-z5wvn\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.777338 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8nbk\" (UniqueName: \"kubernetes.io/projected/633f63c1-539f-4477-8aae-d6731a514280-kube-api-access-l8nbk\") pod \"ovn-controller-ovs-6n7w9\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.925368 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:25 crc kubenswrapper[4861]: I0129 06:53:25.939684 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.299956 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.301995 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.304762 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.305130 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.307181 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-jxctz" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.307332 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.307449 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.317445 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.384795 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-config\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.384925 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.384984 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.385130 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.385175 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dr7zk\" (UniqueName: \"kubernetes.io/projected/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-kube-api-access-dr7zk\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.385209 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.385244 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.385325 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.486452 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.486566 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-config\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.486636 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.486671 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.486750 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.486781 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dr7zk\" (UniqueName: \"kubernetes.io/projected/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-kube-api-access-dr7zk\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.486800 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.486828 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.487552 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-config\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.487788 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.488224 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.489097 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.496693 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.496996 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.500846 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.513984 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dr7zk\" (UniqueName: \"kubernetes.io/projected/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-kube-api-access-dr7zk\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.517288 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:27 crc kubenswrapper[4861]: I0129 06:53:27.654108 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.561969 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.563742 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.566714 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.566772 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.566869 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-qw8q4" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.569048 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.582763 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.621410 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nngmc\" (UniqueName: \"kubernetes.io/projected/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-kube-api-access-nngmc\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.621484 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.621512 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.621559 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.621741 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.621798 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-config\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.621863 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.621938 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.723502 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.723559 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.723595 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.723632 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.723653 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-config\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.723681 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.723709 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.723765 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nngmc\" (UniqueName: \"kubernetes.io/projected/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-kube-api-access-nngmc\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.724364 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.725191 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.725645 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-config\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.725702 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.730887 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.731579 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.732927 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.743352 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nngmc\" (UniqueName: \"kubernetes.io/projected/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-kube-api-access-nngmc\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.744438 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-sb-0\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:29 crc kubenswrapper[4861]: I0129 06:53:29.895646 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.042782 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.043250 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-svt2k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-84bb9d8bd9-f9w6p_openstack(330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.046392 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" podUID="330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.054433 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.054538 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8lrg2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-c7cbb8f79-7dbcs_openstack(d442990e-f715-44bc-86d7-92259130a2f5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.055694 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" podUID="d442990e-f715-44bc-86d7-92259130a2f5" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.093032 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.093227 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6zq4s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5f854695bc-9n4bv_openstack(edf10354-bc30-4c70-9104-81c2e081bcee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.094448 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" podUID="edf10354-bc30-4c70-9104-81c2e081bcee" Jan 29 06:53:30 crc kubenswrapper[4861]: I0129 06:53:30.427471 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 06:53:30 crc kubenswrapper[4861]: I0129 06:53:30.629710 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:53:30 crc kubenswrapper[4861]: I0129 06:53:30.630006 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:53:30 crc kubenswrapper[4861]: E0129 06:53:30.754714 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33\\\"\"" pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" podUID="d442990e-f715-44bc-86d7-92259130a2f5" Jan 29 06:53:31 crc kubenswrapper[4861]: W0129 06:53:31.270203 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c370e6a_40e9_4055_857e_c8357c904c8e.slice/crio-d319f43b0a3b9bbe7170779bb9517e16cd65501660e2e82f024feec0415781ba WatchSource:0}: Error finding container d319f43b0a3b9bbe7170779bb9517e16cd65501660e2e82f024feec0415781ba: Status 404 returned error can't find the container with id d319f43b0a3b9bbe7170779bb9517e16cd65501660e2e82f024feec0415781ba Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.353588 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.395509 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.554167 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-dns-svc\") pod \"edf10354-bc30-4c70-9104-81c2e081bcee\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.554221 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-config\") pod \"edf10354-bc30-4c70-9104-81c2e081bcee\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.554261 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svt2k\" (UniqueName: \"kubernetes.io/projected/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-kube-api-access-svt2k\") pod \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\" (UID: \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\") " Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.554281 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zq4s\" (UniqueName: \"kubernetes.io/projected/edf10354-bc30-4c70-9104-81c2e081bcee-kube-api-access-6zq4s\") pod \"edf10354-bc30-4c70-9104-81c2e081bcee\" (UID: \"edf10354-bc30-4c70-9104-81c2e081bcee\") " Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.554344 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-config\") pod \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\" (UID: \"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0\") " Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.555107 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-config" (OuterVolumeSpecName: "config") pod "330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0" (UID: "330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.555509 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "edf10354-bc30-4c70-9104-81c2e081bcee" (UID: "edf10354-bc30-4c70-9104-81c2e081bcee"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.555889 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-config" (OuterVolumeSpecName: "config") pod "edf10354-bc30-4c70-9104-81c2e081bcee" (UID: "edf10354-bc30-4c70-9104-81c2e081bcee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.562716 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-kube-api-access-svt2k" (OuterVolumeSpecName: "kube-api-access-svt2k") pod "330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0" (UID: "330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0"). InnerVolumeSpecName "kube-api-access-svt2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.562923 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edf10354-bc30-4c70-9104-81c2e081bcee-kube-api-access-6zq4s" (OuterVolumeSpecName: "kube-api-access-6zq4s") pod "edf10354-bc30-4c70-9104-81c2e081bcee" (UID: "edf10354-bc30-4c70-9104-81c2e081bcee"). InnerVolumeSpecName "kube-api-access-6zq4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.656823 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.656855 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf10354-bc30-4c70-9104-81c2e081bcee-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.656865 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svt2k\" (UniqueName: \"kubernetes.io/projected/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-kube-api-access-svt2k\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.656875 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zq4s\" (UniqueName: \"kubernetes.io/projected/edf10354-bc30-4c70-9104-81c2e081bcee-kube-api-access-6zq4s\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.656884 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.696475 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.760024 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8c370e6a-40e9-4055-857e-c8357c904c8e","Type":"ContainerStarted","Data":"d319f43b0a3b9bbe7170779bb9517e16cd65501660e2e82f024feec0415781ba"} Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.762354 4861 generic.go:334] "Generic (PLEG): container finished" podID="76403591-49dd-48f3-976f-34a9b6d7ba8a" containerID="71586b55f6fbb29ba2e6038dd24e6ed3fa5ed8e5a0decb745ccf4808995a47c4" exitCode=0 Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.762467 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" event={"ID":"76403591-49dd-48f3-976f-34a9b6d7ba8a","Type":"ContainerDied","Data":"71586b55f6fbb29ba2e6038dd24e6ed3fa5ed8e5a0decb745ccf4808995a47c4"} Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.768881 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" event={"ID":"edf10354-bc30-4c70-9104-81c2e081bcee","Type":"ContainerDied","Data":"e8824d85e175394979b3feaeee35491324536ed364d34503af105e2f51195d39"} Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.768975 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-9n4bv" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.773343 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" event={"ID":"330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0","Type":"ContainerDied","Data":"1debd21e234e9d777f6a93a0a1997ebfc18fffd33f525f87addb633f2f9c67b8"} Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.773424 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-f9w6p" Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.865249 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-f9w6p"] Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.892017 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-f9w6p"] Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.910970 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9n4bv"] Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.924752 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-9n4bv"] Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.932531 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.938664 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 06:53:31 crc kubenswrapper[4861]: I0129 06:53:31.943698 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 06:53:31 crc kubenswrapper[4861]: W0129 06:53:31.951405 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0032cf8_4c45_4a4c_927d_686adba85ab1.slice/crio-d5fb90236084102c2f246f84c52584717d4f7ade6757e8ede8b718d527e39130 WatchSource:0}: Error finding container d5fb90236084102c2f246f84c52584717d4f7ade6757e8ede8b718d527e39130: Status 404 returned error can't find the container with id d5fb90236084102c2f246f84c52584717d4f7ade6757e8ede8b718d527e39130 Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.021337 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.053806 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-z5wvn"] Jan 29 06:53:32 crc kubenswrapper[4861]: W0129 06:53:32.086469 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f8ce486_c345_41aa_b641_b7c4ef27ecfe.slice/crio-2d43122e89fc2f3b86af7df1e43cb42866a149cfc1bda0377adcec5c856c1fd4 WatchSource:0}: Error finding container 2d43122e89fc2f3b86af7df1e43cb42866a149cfc1bda0377adcec5c856c1fd4: Status 404 returned error can't find the container with id 2d43122e89fc2f3b86af7df1e43cb42866a149cfc1bda0377adcec5c856c1fd4 Jan 29 06:53:32 crc kubenswrapper[4861]: W0129 06:53:32.088322 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b52afb6_32de_4f14_9663_adeec08b4fad.slice/crio-0b05369c08b4c72eb185ac082b775e15c7b7da6052ceb4cfe24070e9515aff73 WatchSource:0}: Error finding container 0b05369c08b4c72eb185ac082b775e15c7b7da6052ceb4cfe24070e9515aff73: Status 404 returned error can't find the container with id 0b05369c08b4c72eb185ac082b775e15c7b7da6052ceb4cfe24070e9515aff73 Jan 29 06:53:32 crc kubenswrapper[4861]: W0129 06:53:32.114584 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1432e17_a4e2_4cde_a3d7_89eddf9973e1.slice/crio-93e991e2aeb051baab726270af043589852809398efd615d37fbe49ad5ff6ce4 WatchSource:0}: Error finding container 93e991e2aeb051baab726270af043589852809398efd615d37fbe49ad5ff6ce4: Status 404 returned error can't find the container with id 93e991e2aeb051baab726270af043589852809398efd615d37fbe49ad5ff6ce4 Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.116353 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.796703 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b8b1385-123a-4b60-af39-82d6492a65c2","Type":"ContainerStarted","Data":"2808f05e16652e14e080ca41ff8920b6abdf36850a94e302e37f9f0b96a4b421"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.800614 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a1432e17-a4e2-4cde-a3d7-89eddf9973e1","Type":"ContainerStarted","Data":"93e991e2aeb051baab726270af043589852809398efd615d37fbe49ad5ff6ce4"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.802201 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a0032cf8-4c45-4a4c-927d-686adba85ab1","Type":"ContainerStarted","Data":"d5fb90236084102c2f246f84c52584717d4f7ade6757e8ede8b718d527e39130"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.808316 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5966cedc-8ab5-4390-906b-c5ac39333e09","Type":"ContainerStarted","Data":"f7185532535e4d49a2dc75b14a2352f7b4e01086e0368adf56f37bf0e4b29b31"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.810495 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn" event={"ID":"5b52afb6-32de-4f14-9663-adeec08b4fad","Type":"ContainerStarted","Data":"0b05369c08b4c72eb185ac082b775e15c7b7da6052ceb4cfe24070e9515aff73"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.811976 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"34dfd085-c2bc-4fa4-a950-7df85c48fec0","Type":"ContainerStarted","Data":"ef7806b63293485a6b7348f6d54b702304b3a892535a39e02f2acabce887bfc7"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.821476 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f8ce486-c345-41aa-b641-b7c4ef27ecfe","Type":"ContainerStarted","Data":"2d43122e89fc2f3b86af7df1e43cb42866a149cfc1bda0377adcec5c856c1fd4"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.830198 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" event={"ID":"76403591-49dd-48f3-976f-34a9b6d7ba8a","Type":"ContainerStarted","Data":"af43cd9a15bdcd0bf0800a6a9adb43727ef4cf877e39df92be7c761c11265621"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.831063 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.833682 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f091de47-47eb-4d84-92b8-c478bc6a7af7","Type":"ContainerStarted","Data":"315ec98084a2af61029598f51483b7de7c636f2d7f53e43acb214e1fbf8573e3"} Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.849278 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" podStartSLOduration=3.013813676 podStartE2EDuration="17.849263308s" podCreationTimestamp="2026-01-29 06:53:15 +0000 UTC" firstStartedPulling="2026-01-29 06:53:16.562023238 +0000 UTC m=+1088.233517795" lastFinishedPulling="2026-01-29 06:53:31.39747285 +0000 UTC m=+1103.068967427" observedRunningTime="2026-01-29 06:53:32.845600775 +0000 UTC m=+1104.517095332" watchObservedRunningTime="2026-01-29 06:53:32.849263308 +0000 UTC m=+1104.520757865" Jan 29 06:53:32 crc kubenswrapper[4861]: I0129 06:53:32.955696 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-6n7w9"] Jan 29 06:53:33 crc kubenswrapper[4861]: I0129 06:53:33.131359 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0" path="/var/lib/kubelet/pods/330a83ea-3e4a-4ae5-9f14-7b3d0aa3aec0/volumes" Jan 29 06:53:33 crc kubenswrapper[4861]: I0129 06:53:33.131729 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edf10354-bc30-4c70-9104-81c2e081bcee" path="/var/lib/kubelet/pods/edf10354-bc30-4c70-9104-81c2e081bcee/volumes" Jan 29 06:53:33 crc kubenswrapper[4861]: I0129 06:53:33.845574 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5966cedc-8ab5-4390-906b-c5ac39333e09","Type":"ContainerStarted","Data":"093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6"} Jan 29 06:53:33 crc kubenswrapper[4861]: I0129 06:53:33.849007 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6n7w9" event={"ID":"633f63c1-539f-4477-8aae-d6731a514280","Type":"ContainerStarted","Data":"9c31799a9d35eeb6391fb1cb5480751f486272dd441b9fe4dc9735c2dd41dea0"} Jan 29 06:53:36 crc kubenswrapper[4861]: I0129 06:53:36.026260 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:53:36 crc kubenswrapper[4861]: I0129 06:53:36.090530 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c7cbb8f79-7dbcs"] Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.223431 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.371656 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-config\") pod \"d442990e-f715-44bc-86d7-92259130a2f5\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.371813 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lrg2\" (UniqueName: \"kubernetes.io/projected/d442990e-f715-44bc-86d7-92259130a2f5-kube-api-access-8lrg2\") pod \"d442990e-f715-44bc-86d7-92259130a2f5\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.371832 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-dns-svc\") pod \"d442990e-f715-44bc-86d7-92259130a2f5\" (UID: \"d442990e-f715-44bc-86d7-92259130a2f5\") " Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.372139 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-config" (OuterVolumeSpecName: "config") pod "d442990e-f715-44bc-86d7-92259130a2f5" (UID: "d442990e-f715-44bc-86d7-92259130a2f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.372820 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d442990e-f715-44bc-86d7-92259130a2f5" (UID: "d442990e-f715-44bc-86d7-92259130a2f5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.376619 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d442990e-f715-44bc-86d7-92259130a2f5-kube-api-access-8lrg2" (OuterVolumeSpecName: "kube-api-access-8lrg2") pod "d442990e-f715-44bc-86d7-92259130a2f5" (UID: "d442990e-f715-44bc-86d7-92259130a2f5"). InnerVolumeSpecName "kube-api-access-8lrg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.475852 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.475886 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lrg2\" (UniqueName: \"kubernetes.io/projected/d442990e-f715-44bc-86d7-92259130a2f5-kube-api-access-8lrg2\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.475900 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d442990e-f715-44bc-86d7-92259130a2f5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.882851 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" event={"ID":"d442990e-f715-44bc-86d7-92259130a2f5","Type":"ContainerDied","Data":"f69d3afff9418f85aaf40d794bc306156ba92eb45dd41c13064d1aea40bedc65"} Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.882938 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c7cbb8f79-7dbcs" Jan 29 06:53:38 crc kubenswrapper[4861]: I0129 06:53:38.982310 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c7cbb8f79-7dbcs"] Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.002645 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c7cbb8f79-7dbcs"] Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.132520 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d442990e-f715-44bc-86d7-92259130a2f5" path="/var/lib/kubelet/pods/d442990e-f715-44bc-86d7-92259130a2f5/volumes" Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.891897 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8c370e6a-40e9-4055-857e-c8357c904c8e","Type":"ContainerStarted","Data":"4c1ce7cacae4207060ac4f5331c4d327e43e77b67b6c58c3045038a6a4ddde7c"} Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.893612 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"34dfd085-c2bc-4fa4-a950-7df85c48fec0","Type":"ContainerStarted","Data":"644d3d4ecb0360b4f4094c99e7ffd6babd80fb95a501022e6a4b47e201e4406b"} Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.894003 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.895908 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f8ce486-c345-41aa-b641-b7c4ef27ecfe","Type":"ContainerStarted","Data":"99b14fb198d584fcdb36a249a3809f469be4cd3e4644d253366d9f932150b9eb"} Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.902636 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a1432e17-a4e2-4cde-a3d7-89eddf9973e1","Type":"ContainerStarted","Data":"0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83"} Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.905851 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a0032cf8-4c45-4a4c-927d-686adba85ab1","Type":"ContainerStarted","Data":"6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96"} Jan 29 06:53:39 crc kubenswrapper[4861]: I0129 06:53:39.975440 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=13.165316937 podStartE2EDuration="19.975419278s" podCreationTimestamp="2026-01-29 06:53:20 +0000 UTC" firstStartedPulling="2026-01-29 06:53:31.953835821 +0000 UTC m=+1103.625330378" lastFinishedPulling="2026-01-29 06:53:38.763938162 +0000 UTC m=+1110.435432719" observedRunningTime="2026-01-29 06:53:39.959356067 +0000 UTC m=+1111.630850644" watchObservedRunningTime="2026-01-29 06:53:39.975419278 +0000 UTC m=+1111.646913825" Jan 29 06:53:40 crc kubenswrapper[4861]: I0129 06:53:40.921968 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f091de47-47eb-4d84-92b8-c478bc6a7af7","Type":"ContainerStarted","Data":"314634ced71a87f16173e1b9c68237474cd93111184df069ec48d9a61d3014b3"} Jan 29 06:53:40 crc kubenswrapper[4861]: I0129 06:53:40.922385 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 06:53:40 crc kubenswrapper[4861]: I0129 06:53:40.928293 4861 generic.go:334] "Generic (PLEG): container finished" podID="633f63c1-539f-4477-8aae-d6731a514280" containerID="7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f" exitCode=0 Jan 29 06:53:40 crc kubenswrapper[4861]: I0129 06:53:40.926002 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn" event={"ID":"5b52afb6-32de-4f14-9663-adeec08b4fad","Type":"ContainerStarted","Data":"43ed60341599191e6dc9eb1bb3d03dd08460301dbced2c46cda3df1d5546241f"} Jan 29 06:53:40 crc kubenswrapper[4861]: I0129 06:53:40.930758 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6n7w9" event={"ID":"633f63c1-539f-4477-8aae-d6731a514280","Type":"ContainerDied","Data":"7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f"} Jan 29 06:53:40 crc kubenswrapper[4861]: I0129 06:53:40.938720 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=11.206715818 podStartE2EDuration="18.938693724s" podCreationTimestamp="2026-01-29 06:53:22 +0000 UTC" firstStartedPulling="2026-01-29 06:53:31.916574052 +0000 UTC m=+1103.588068609" lastFinishedPulling="2026-01-29 06:53:39.648551928 +0000 UTC m=+1111.320046515" observedRunningTime="2026-01-29 06:53:40.937241417 +0000 UTC m=+1112.608736034" watchObservedRunningTime="2026-01-29 06:53:40.938693724 +0000 UTC m=+1112.610188321" Jan 29 06:53:40 crc kubenswrapper[4861]: I0129 06:53:40.969048 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-z5wvn" podStartSLOduration=8.520953164 podStartE2EDuration="15.969023029s" podCreationTimestamp="2026-01-29 06:53:25 +0000 UTC" firstStartedPulling="2026-01-29 06:53:32.089865809 +0000 UTC m=+1103.761360366" lastFinishedPulling="2026-01-29 06:53:39.537935674 +0000 UTC m=+1111.209430231" observedRunningTime="2026-01-29 06:53:40.963367304 +0000 UTC m=+1112.634861901" watchObservedRunningTime="2026-01-29 06:53:40.969023029 +0000 UTC m=+1112.640517626" Jan 29 06:53:41 crc kubenswrapper[4861]: I0129 06:53:41.944144 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6n7w9" event={"ID":"633f63c1-539f-4477-8aae-d6731a514280","Type":"ContainerStarted","Data":"f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f"} Jan 29 06:53:41 crc kubenswrapper[4861]: I0129 06:53:41.944852 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-z5wvn" Jan 29 06:53:41 crc kubenswrapper[4861]: I0129 06:53:41.944883 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6n7w9" event={"ID":"633f63c1-539f-4477-8aae-d6731a514280","Type":"ContainerStarted","Data":"df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52"} Jan 29 06:53:41 crc kubenswrapper[4861]: I0129 06:53:41.972116 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-6n7w9" podStartSLOduration=10.802693321 podStartE2EDuration="16.972054411s" podCreationTimestamp="2026-01-29 06:53:25 +0000 UTC" firstStartedPulling="2026-01-29 06:53:32.984345439 +0000 UTC m=+1104.655839996" lastFinishedPulling="2026-01-29 06:53:39.153706489 +0000 UTC m=+1110.825201086" observedRunningTime="2026-01-29 06:53:41.964108918 +0000 UTC m=+1113.635603535" watchObservedRunningTime="2026-01-29 06:53:41.972054411 +0000 UTC m=+1113.643548998" Jan 29 06:53:42 crc kubenswrapper[4861]: I0129 06:53:42.963970 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:42 crc kubenswrapper[4861]: I0129 06:53:42.964060 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:53:45 crc kubenswrapper[4861]: I0129 06:53:45.501801 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 29 06:53:48 crc kubenswrapper[4861]: I0129 06:53:48.015041 4861 generic.go:334] "Generic (PLEG): container finished" podID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerID="4c1ce7cacae4207060ac4f5331c4d327e43e77b67b6c58c3045038a6a4ddde7c" exitCode=0 Jan 29 06:53:48 crc kubenswrapper[4861]: I0129 06:53:48.015177 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8c370e6a-40e9-4055-857e-c8357c904c8e","Type":"ContainerDied","Data":"4c1ce7cacae4207060ac4f5331c4d327e43e77b67b6c58c3045038a6a4ddde7c"} Jan 29 06:53:48 crc kubenswrapper[4861]: I0129 06:53:48.020856 4861 generic.go:334] "Generic (PLEG): container finished" podID="a0032cf8-4c45-4a4c-927d-686adba85ab1" containerID="6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96" exitCode=0 Jan 29 06:53:48 crc kubenswrapper[4861]: I0129 06:53:48.020910 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a0032cf8-4c45-4a4c-927d-686adba85ab1","Type":"ContainerDied","Data":"6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96"} Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.030874 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a1432e17-a4e2-4cde-a3d7-89eddf9973e1","Type":"ContainerStarted","Data":"bdb5e7afda9ae61df28a1d053e02b61d09e0d2ad8787a4805fb51623eb383c1d"} Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.035030 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a0032cf8-4c45-4a4c-927d-686adba85ab1","Type":"ContainerStarted","Data":"9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3"} Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.040437 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8c370e6a-40e9-4055-857e-c8357c904c8e","Type":"ContainerStarted","Data":"9c170b6b24190e8407f34d3e2aa2e80d167bb93c61c16add369b12444c54d78a"} Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.043580 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f8ce486-c345-41aa-b641-b7c4ef27ecfe","Type":"ContainerStarted","Data":"162f0b5fe401e18a951e9523bbee70e5964f24419ec2081475b2ab90051cf4b6"} Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.060276 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=4.7321510490000005 podStartE2EDuration="21.060193142s" podCreationTimestamp="2026-01-29 06:53:28 +0000 UTC" firstStartedPulling="2026-01-29 06:53:32.11719242 +0000 UTC m=+1103.788686977" lastFinishedPulling="2026-01-29 06:53:48.445234513 +0000 UTC m=+1120.116729070" observedRunningTime="2026-01-29 06:53:49.054557108 +0000 UTC m=+1120.726051765" watchObservedRunningTime="2026-01-29 06:53:49.060193142 +0000 UTC m=+1120.731687709" Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.094842 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=24.164041261 podStartE2EDuration="31.094825706s" podCreationTimestamp="2026-01-29 06:53:18 +0000 UTC" firstStartedPulling="2026-01-29 06:53:31.953504332 +0000 UTC m=+1103.624998889" lastFinishedPulling="2026-01-29 06:53:38.884288767 +0000 UTC m=+1110.555783334" observedRunningTime="2026-01-29 06:53:49.089512421 +0000 UTC m=+1120.761007018" watchObservedRunningTime="2026-01-29 06:53:49.094825706 +0000 UTC m=+1120.766320273" Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.121197 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=6.733999422 podStartE2EDuration="23.1211816s" podCreationTimestamp="2026-01-29 06:53:26 +0000 UTC" firstStartedPulling="2026-01-29 06:53:32.088561245 +0000 UTC m=+1103.760055812" lastFinishedPulling="2026-01-29 06:53:48.475743393 +0000 UTC m=+1120.147237990" observedRunningTime="2026-01-29 06:53:49.115915445 +0000 UTC m=+1120.787410042" watchObservedRunningTime="2026-01-29 06:53:49.1211816 +0000 UTC m=+1120.792676157" Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.143697 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=24.303728478 podStartE2EDuration="32.143673474s" podCreationTimestamp="2026-01-29 06:53:17 +0000 UTC" firstStartedPulling="2026-01-29 06:53:31.315177279 +0000 UTC m=+1102.986671836" lastFinishedPulling="2026-01-29 06:53:39.155122275 +0000 UTC m=+1110.826616832" observedRunningTime="2026-01-29 06:53:49.13880988 +0000 UTC m=+1120.810304477" watchObservedRunningTime="2026-01-29 06:53:49.143673474 +0000 UTC m=+1120.815168071" Jan 29 06:53:49 crc kubenswrapper[4861]: I0129 06:53:49.896175 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:50 crc kubenswrapper[4861]: I0129 06:53:50.015522 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:50 crc kubenswrapper[4861]: I0129 06:53:50.015605 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:50 crc kubenswrapper[4861]: E0129 06:53:50.722120 4861 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.80:34614->38.102.83.80:46667: write tcp 38.102.83.80:34614->38.102.83.80:46667: write: broken pipe Jan 29 06:53:50 crc kubenswrapper[4861]: I0129 06:53:50.896705 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:50 crc kubenswrapper[4861]: I0129 06:53:50.962268 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.130357 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.396133 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b79764b65-6ktkf"] Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.402792 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.406745 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b79764b65-6ktkf"] Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.407451 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.482861 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-h8qb2"] Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.483709 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.485510 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.498539 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-h8qb2"] Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.519178 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-dns-svc\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.519249 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-config\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.519360 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jgvw\" (UniqueName: \"kubernetes.io/projected/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-kube-api-access-7jgvw\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.519405 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-ovsdbserver-sb\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621298 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovn-rundir\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621361 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-config\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621411 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621469 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq2td\" (UniqueName: \"kubernetes.io/projected/a871f110-29fe-4e80-b339-5209aebc0652-kube-api-access-bq2td\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621499 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a871f110-29fe-4e80-b339-5209aebc0652-config\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621525 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jgvw\" (UniqueName: \"kubernetes.io/projected/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-kube-api-access-7jgvw\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621553 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-combined-ca-bundle\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621785 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-ovsdbserver-sb\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621845 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovs-rundir\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.621900 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-dns-svc\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.622276 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-config\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.622635 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-ovsdbserver-sb\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.622794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-dns-svc\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.642180 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jgvw\" (UniqueName: \"kubernetes.io/projected/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-kube-api-access-7jgvw\") pod \"dnsmasq-dns-5b79764b65-6ktkf\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.654958 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.707221 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.714192 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b79764b65-6ktkf"] Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.714837 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.723210 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.723288 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq2td\" (UniqueName: \"kubernetes.io/projected/a871f110-29fe-4e80-b339-5209aebc0652-kube-api-access-bq2td\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.723333 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a871f110-29fe-4e80-b339-5209aebc0652-config\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.723371 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-combined-ca-bundle\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.723401 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovs-rundir\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.723451 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovn-rundir\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.724940 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a871f110-29fe-4e80-b339-5209aebc0652-config\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.731784 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovs-rundir\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.731784 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovn-rundir\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.733436 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.739581 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-combined-ca-bundle\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.749341 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq2td\" (UniqueName: \"kubernetes.io/projected/a871f110-29fe-4e80-b339-5209aebc0652-kube-api-access-bq2td\") pod \"ovn-controller-metrics-h8qb2\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.753462 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-pm6pd"] Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.755448 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.760386 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.778129 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-pm6pd"] Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.804374 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.824250 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.824380 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-config\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.824456 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxhd9\" (UniqueName: \"kubernetes.io/projected/453d4ef4-21ed-4ba5-8a54-7d16e7555423-kube-api-access-gxhd9\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.824541 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.824653 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-dns-svc\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.926652 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.927047 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-dns-svc\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.927157 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.927193 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-config\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.927223 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxhd9\" (UniqueName: \"kubernetes.io/projected/453d4ef4-21ed-4ba5-8a54-7d16e7555423-kube-api-access-gxhd9\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.928762 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.929406 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-dns-svc\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.930016 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.930656 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-config\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:51 crc kubenswrapper[4861]: I0129 06:53:51.945242 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxhd9\" (UniqueName: \"kubernetes.io/projected/453d4ef4-21ed-4ba5-8a54-7d16e7555423-kube-api-access-gxhd9\") pod \"dnsmasq-dns-586b989cdc-pm6pd\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.068877 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.113496 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.190776 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.201124 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b79764b65-6ktkf"] Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.277991 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.279256 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.293947 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-h8qb2"] Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.317657 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.317832 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.318690 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-xqbxq" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.319430 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.324920 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 06:53:52 crc kubenswrapper[4861]: W0129 06:53:52.334251 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda871f110_29fe_4e80_b339_5209aebc0652.slice/crio-5ad21a9e6631c281c20796d9b9df214a5eaa20b832d5832f99725eaa33aff7c0 WatchSource:0}: Error finding container 5ad21a9e6631c281c20796d9b9df214a5eaa20b832d5832f99725eaa33aff7c0: Status 404 returned error can't find the container with id 5ad21a9e6631c281c20796d9b9df214a5eaa20b832d5832f99725eaa33aff7c0 Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.335609 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.335784 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-config\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.335823 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.344391 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.345295 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64dvp\" (UniqueName: \"kubernetes.io/projected/93a9df75-0ea9-457b-84f0-17b95d5dcced-kube-api-access-64dvp\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.345369 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.345400 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-scripts\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.448368 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-scripts\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.448432 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.448495 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-config\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.448516 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.448594 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.448650 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64dvp\" (UniqueName: \"kubernetes.io/projected/93a9df75-0ea9-457b-84f0-17b95d5dcced-kube-api-access-64dvp\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.448769 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.449270 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-scripts\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.449429 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-config\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.451459 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.457235 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.458189 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.461314 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.482615 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64dvp\" (UniqueName: \"kubernetes.io/projected/93a9df75-0ea9-457b-84f0-17b95d5dcced-kube-api-access-64dvp\") pod \"ovn-northd-0\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.485179 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-pm6pd"] Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.511942 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-l64rb"] Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.517834 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.525911 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.529207 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-l64rb"] Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.649118 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.653686 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.653833 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.653908 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-config\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.653933 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf2ln\" (UniqueName: \"kubernetes.io/projected/c19001ec-52ca-4c24-8762-01590c5a843f-kube-api-access-cf2ln\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.653956 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.755837 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.755909 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.755975 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-config\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.756003 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf2ln\" (UniqueName: \"kubernetes.io/projected/c19001ec-52ca-4c24-8762-01590c5a843f-kube-api-access-cf2ln\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.756696 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.757211 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-config\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.757929 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.757974 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.758954 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.786559 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf2ln\" (UniqueName: \"kubernetes.io/projected/c19001ec-52ca-4c24-8762-01590c5a843f-kube-api-access-cf2ln\") pod \"dnsmasq-dns-67fdf7998c-l64rb\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.787017 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-pm6pd"] Jan 29 06:53:52 crc kubenswrapper[4861]: W0129 06:53:52.795261 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod453d4ef4_21ed_4ba5_8a54_7d16e7555423.slice/crio-b9c2327cf97979e529e4da07c20d011bbae3586d021341afe50af6347036eea8 WatchSource:0}: Error finding container b9c2327cf97979e529e4da07c20d011bbae3586d021341afe50af6347036eea8: Status 404 returned error can't find the container with id b9c2327cf97979e529e4da07c20d011bbae3586d021341afe50af6347036eea8 Jan 29 06:53:52 crc kubenswrapper[4861]: I0129 06:53:52.888169 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.076264 4861 generic.go:334] "Generic (PLEG): container finished" podID="453d4ef4-21ed-4ba5-8a54-7d16e7555423" containerID="2822dc1761cfab96724d95895e67eb4cc9a57c1d33adf1349fcb61cfb8c7798c" exitCode=0 Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.076321 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" event={"ID":"453d4ef4-21ed-4ba5-8a54-7d16e7555423","Type":"ContainerDied","Data":"2822dc1761cfab96724d95895e67eb4cc9a57c1d33adf1349fcb61cfb8c7798c"} Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.076343 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" event={"ID":"453d4ef4-21ed-4ba5-8a54-7d16e7555423","Type":"ContainerStarted","Data":"b9c2327cf97979e529e4da07c20d011bbae3586d021341afe50af6347036eea8"} Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.079725 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-h8qb2" event={"ID":"a871f110-29fe-4e80-b339-5209aebc0652","Type":"ContainerStarted","Data":"96077c6516cf385f97eadb5e32fbb191b2f9052a56daddb824c1bc8fcead61a3"} Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.079752 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-h8qb2" event={"ID":"a871f110-29fe-4e80-b339-5209aebc0652","Type":"ContainerStarted","Data":"5ad21a9e6631c281c20796d9b9df214a5eaa20b832d5832f99725eaa33aff7c0"} Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.091969 4861 generic.go:334] "Generic (PLEG): container finished" podID="a9b29f52-f841-4db9-8ae1-a3e31674e7c6" containerID="8ca7c944e9eefc3ee5921b27f8a9ffa6222fc99a832bc1a1b59410ad7811b887" exitCode=0 Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.092765 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" event={"ID":"a9b29f52-f841-4db9-8ae1-a3e31674e7c6","Type":"ContainerDied","Data":"8ca7c944e9eefc3ee5921b27f8a9ffa6222fc99a832bc1a1b59410ad7811b887"} Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.092836 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" event={"ID":"a9b29f52-f841-4db9-8ae1-a3e31674e7c6","Type":"ContainerStarted","Data":"3d2835907dd654385863679e2f1d83607b8e05b0d9d4be93a069028cf2b5e64d"} Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.140121 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-h8qb2" podStartSLOduration=2.140096199 podStartE2EDuration="2.140096199s" podCreationTimestamp="2026-01-29 06:53:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:53:53.114563467 +0000 UTC m=+1124.786058034" watchObservedRunningTime="2026-01-29 06:53:53.140096199 +0000 UTC m=+1124.811590756" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.169367 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.326623 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-l64rb"] Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.450168 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.503401 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577380 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-ovsdbserver-sb\") pod \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577489 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-dns-svc\") pod \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577553 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-config\") pod \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577613 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-config\") pod \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577645 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxhd9\" (UniqueName: \"kubernetes.io/projected/453d4ef4-21ed-4ba5-8a54-7d16e7555423-kube-api-access-gxhd9\") pod \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577667 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jgvw\" (UniqueName: \"kubernetes.io/projected/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-kube-api-access-7jgvw\") pod \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\" (UID: \"a9b29f52-f841-4db9-8ae1-a3e31674e7c6\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577731 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-dns-svc\") pod \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577765 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-sb\") pod \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.577787 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-nb\") pod \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\" (UID: \"453d4ef4-21ed-4ba5-8a54-7d16e7555423\") " Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.583909 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-kube-api-access-7jgvw" (OuterVolumeSpecName: "kube-api-access-7jgvw") pod "a9b29f52-f841-4db9-8ae1-a3e31674e7c6" (UID: "a9b29f52-f841-4db9-8ae1-a3e31674e7c6"). InnerVolumeSpecName "kube-api-access-7jgvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.591951 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/453d4ef4-21ed-4ba5-8a54-7d16e7555423-kube-api-access-gxhd9" (OuterVolumeSpecName: "kube-api-access-gxhd9") pod "453d4ef4-21ed-4ba5-8a54-7d16e7555423" (UID: "453d4ef4-21ed-4ba5-8a54-7d16e7555423"). InnerVolumeSpecName "kube-api-access-gxhd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.600549 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 29 06:53:53 crc kubenswrapper[4861]: E0129 06:53:53.601048 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9b29f52-f841-4db9-8ae1-a3e31674e7c6" containerName="init" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.601088 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9b29f52-f841-4db9-8ae1-a3e31674e7c6" containerName="init" Jan 29 06:53:53 crc kubenswrapper[4861]: E0129 06:53:53.601145 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="453d4ef4-21ed-4ba5-8a54-7d16e7555423" containerName="init" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.601153 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="453d4ef4-21ed-4ba5-8a54-7d16e7555423" containerName="init" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.601371 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9b29f52-f841-4db9-8ae1-a3e31674e7c6" containerName="init" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.601391 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="453d4ef4-21ed-4ba5-8a54-7d16e7555423" containerName="init" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.601866 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-config" (OuterVolumeSpecName: "config") pod "453d4ef4-21ed-4ba5-8a54-7d16e7555423" (UID: "453d4ef4-21ed-4ba5-8a54-7d16e7555423"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.606663 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-config" (OuterVolumeSpecName: "config") pod "a9b29f52-f841-4db9-8ae1-a3e31674e7c6" (UID: "a9b29f52-f841-4db9-8ae1-a3e31674e7c6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.607295 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "453d4ef4-21ed-4ba5-8a54-7d16e7555423" (UID: "453d4ef4-21ed-4ba5-8a54-7d16e7555423"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.607754 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.614971 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.615218 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-xx4vk" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.615308 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.615426 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.620351 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.623549 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a9b29f52-f841-4db9-8ae1-a3e31674e7c6" (UID: "a9b29f52-f841-4db9-8ae1-a3e31674e7c6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.631255 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a9b29f52-f841-4db9-8ae1-a3e31674e7c6" (UID: "a9b29f52-f841-4db9-8ae1-a3e31674e7c6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.636527 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "453d4ef4-21ed-4ba5-8a54-7d16e7555423" (UID: "453d4ef4-21ed-4ba5-8a54-7d16e7555423"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.636870 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "453d4ef4-21ed-4ba5-8a54-7d16e7555423" (UID: "453d4ef4-21ed-4ba5-8a54-7d16e7555423"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680083 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-lock\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680481 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfwnr\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-kube-api-access-cfwnr\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680560 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680591 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d54030c-d725-4a6c-ad29-d84482378f20-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680660 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-cache\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680701 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680768 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680805 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680824 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxhd9\" (UniqueName: \"kubernetes.io/projected/453d4ef4-21ed-4ba5-8a54-7d16e7555423-kube-api-access-gxhd9\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680836 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jgvw\" (UniqueName: \"kubernetes.io/projected/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-kube-api-access-7jgvw\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680844 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680853 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680860 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/453d4ef4-21ed-4ba5-8a54-7d16e7555423-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680868 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.680876 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9b29f52-f841-4db9-8ae1-a3e31674e7c6-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.782785 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d54030c-d725-4a6c-ad29-d84482378f20-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.782884 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-cache\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.782943 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.783000 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-lock\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.783032 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfwnr\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-kube-api-access-cfwnr\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: E0129 06:53:53.783334 4861 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 06:53:53 crc kubenswrapper[4861]: E0129 06:53:53.783352 4861 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 06:53:53 crc kubenswrapper[4861]: E0129 06:53:53.783407 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift podName:7d54030c-d725-4a6c-ad29-d84482378f20 nodeName:}" failed. No retries permitted until 2026-01-29 06:53:54.283385481 +0000 UTC m=+1125.954880038 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift") pod "swift-storage-0" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20") : configmap "swift-ring-files" not found Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.783478 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-cache\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.783679 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.783931 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-lock\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.784017 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.789575 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d54030c-d725-4a6c-ad29-d84482378f20-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.808257 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfwnr\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-kube-api-access-cfwnr\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:53 crc kubenswrapper[4861]: I0129 06:53:53.814265 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.102128 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" event={"ID":"a9b29f52-f841-4db9-8ae1-a3e31674e7c6","Type":"ContainerDied","Data":"3d2835907dd654385863679e2f1d83607b8e05b0d9d4be93a069028cf2b5e64d"} Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.102161 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b79764b65-6ktkf" Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.102194 4861 scope.go:117] "RemoveContainer" containerID="8ca7c944e9eefc3ee5921b27f8a9ffa6222fc99a832bc1a1b59410ad7811b887" Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.103704 4861 generic.go:334] "Generic (PLEG): container finished" podID="c19001ec-52ca-4c24-8762-01590c5a843f" containerID="cb5a44707b0824360acb1f4a13b895be9162af2cd1d5933432c10c775e0ebeda" exitCode=0 Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.103759 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" event={"ID":"c19001ec-52ca-4c24-8762-01590c5a843f","Type":"ContainerDied","Data":"cb5a44707b0824360acb1f4a13b895be9162af2cd1d5933432c10c775e0ebeda"} Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.103784 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" event={"ID":"c19001ec-52ca-4c24-8762-01590c5a843f","Type":"ContainerStarted","Data":"9cee34911c42ac5398bfcea688c2bda63d7d03acf32a6373303aa0cd7f8c6c23"} Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.107885 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" event={"ID":"453d4ef4-21ed-4ba5-8a54-7d16e7555423","Type":"ContainerDied","Data":"b9c2327cf97979e529e4da07c20d011bbae3586d021341afe50af6347036eea8"} Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.107964 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-pm6pd" Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.110749 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"93a9df75-0ea9-457b-84f0-17b95d5dcced","Type":"ContainerStarted","Data":"162f15ef0121a63ab7849141694da0a5105505ef7dd8d76872e40ac19151c918"} Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.226825 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b79764b65-6ktkf"] Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.235997 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b79764b65-6ktkf"] Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.248453 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-pm6pd"] Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.257459 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-pm6pd"] Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.293370 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:54 crc kubenswrapper[4861]: E0129 06:53:54.293562 4861 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 06:53:54 crc kubenswrapper[4861]: E0129 06:53:54.293583 4861 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 06:53:54 crc kubenswrapper[4861]: E0129 06:53:54.293631 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift podName:7d54030c-d725-4a6c-ad29-d84482378f20 nodeName:}" failed. No retries permitted until 2026-01-29 06:53:55.293614164 +0000 UTC m=+1126.965108721 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift") pod "swift-storage-0" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20") : configmap "swift-ring-files" not found Jan 29 06:53:54 crc kubenswrapper[4861]: I0129 06:53:54.401056 4861 scope.go:117] "RemoveContainer" containerID="2822dc1761cfab96724d95895e67eb4cc9a57c1d33adf1349fcb61cfb8c7798c" Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.125908 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="453d4ef4-21ed-4ba5-8a54-7d16e7555423" path="/var/lib/kubelet/pods/453d4ef4-21ed-4ba5-8a54-7d16e7555423/volumes" Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.126737 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9b29f52-f841-4db9-8ae1-a3e31674e7c6" path="/var/lib/kubelet/pods/a9b29f52-f841-4db9-8ae1-a3e31674e7c6/volumes" Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.127269 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.127299 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"93a9df75-0ea9-457b-84f0-17b95d5dcced","Type":"ContainerStarted","Data":"015f72c114bfd8ca01ff83fcfd7253c5311da2c4dbfdaa591c7feb5e53a0693d"} Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.127312 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"93a9df75-0ea9-457b-84f0-17b95d5dcced","Type":"ContainerStarted","Data":"82d4301c7f8e1b6d25f3d60567395a1ff2635c17934b75e63917065ada770d83"} Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.129981 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" event={"ID":"c19001ec-52ca-4c24-8762-01590c5a843f","Type":"ContainerStarted","Data":"0cdb16ce2c168c436b331e4bbdf40084165870485570a89136f5fcf62a1ecb90"} Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.130387 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.156606 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.90627983 podStartE2EDuration="3.156590178s" podCreationTimestamp="2026-01-29 06:53:52 +0000 UTC" firstStartedPulling="2026-01-29 06:53:53.18439023 +0000 UTC m=+1124.855884787" lastFinishedPulling="2026-01-29 06:53:54.434700568 +0000 UTC m=+1126.106195135" observedRunningTime="2026-01-29 06:53:55.155065088 +0000 UTC m=+1126.826559655" watchObservedRunningTime="2026-01-29 06:53:55.156590178 +0000 UTC m=+1126.828084735" Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.173692 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" podStartSLOduration=3.173674254 podStartE2EDuration="3.173674254s" podCreationTimestamp="2026-01-29 06:53:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:53:55.172719869 +0000 UTC m=+1126.844214436" watchObservedRunningTime="2026-01-29 06:53:55.173674254 +0000 UTC m=+1126.845168811" Jan 29 06:53:55 crc kubenswrapper[4861]: I0129 06:53:55.314912 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:55 crc kubenswrapper[4861]: E0129 06:53:55.315202 4861 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 06:53:55 crc kubenswrapper[4861]: E0129 06:53:55.315219 4861 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 06:53:55 crc kubenswrapper[4861]: E0129 06:53:55.315268 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift podName:7d54030c-d725-4a6c-ad29-d84482378f20 nodeName:}" failed. No retries permitted until 2026-01-29 06:53:57.31525119 +0000 UTC m=+1128.986745747 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift") pod "swift-storage-0" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20") : configmap "swift-ring-files" not found Jan 29 06:53:56 crc kubenswrapper[4861]: I0129 06:53:56.149498 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:56 crc kubenswrapper[4861]: I0129 06:53:56.253952 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.363268 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:53:57 crc kubenswrapper[4861]: E0129 06:53:57.364242 4861 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 06:53:57 crc kubenswrapper[4861]: E0129 06:53:57.364276 4861 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 06:53:57 crc kubenswrapper[4861]: E0129 06:53:57.364351 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift podName:7d54030c-d725-4a6c-ad29-d84482378f20 nodeName:}" failed. No retries permitted until 2026-01-29 06:54:01.364326242 +0000 UTC m=+1133.035820799 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift") pod "swift-storage-0" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20") : configmap "swift-ring-files" not found Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.624266 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-jx6pp"] Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.625545 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.628499 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.632948 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.634899 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.643250 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-jx6pp"] Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.672552 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-ring-data-devices\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.672608 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-scripts\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.672632 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-swiftconf\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.672694 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-dispersionconf\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.672718 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2h84\" (UniqueName: \"kubernetes.io/projected/0705d705-4857-4f55-a51a-0b9fce72c693-kube-api-access-c2h84\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.672737 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-combined-ca-bundle\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.672768 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0705d705-4857-4f55-a51a-0b9fce72c693-etc-swift\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.681301 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-jx6pp"] Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.688753 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-5j2wv"] Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.689920 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: E0129 06:53:57.691684 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-c2h84 ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-jx6pp" podUID="0705d705-4857-4f55-a51a-0b9fce72c693" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.710401 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-5j2wv"] Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774196 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-scripts\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774243 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-ring-data-devices\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774265 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-swiftconf\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774294 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pn2t7\" (UniqueName: \"kubernetes.io/projected/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-kube-api-access-pn2t7\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774328 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-combined-ca-bundle\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774355 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-etc-swift\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774374 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-ring-data-devices\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774395 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-dispersionconf\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774425 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-dispersionconf\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774446 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2h84\" (UniqueName: \"kubernetes.io/projected/0705d705-4857-4f55-a51a-0b9fce72c693-kube-api-access-c2h84\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774469 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-combined-ca-bundle\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774492 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-scripts\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774523 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0705d705-4857-4f55-a51a-0b9fce72c693-etc-swift\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774543 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-swiftconf\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774958 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-scripts\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.774999 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-ring-data-devices\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.775306 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0705d705-4857-4f55-a51a-0b9fce72c693-etc-swift\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.785674 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-combined-ca-bundle\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.785815 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-dispersionconf\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.786298 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-swiftconf\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.795553 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2h84\" (UniqueName: \"kubernetes.io/projected/0705d705-4857-4f55-a51a-0b9fce72c693-kube-api-access-c2h84\") pod \"swift-ring-rebalance-jx6pp\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.875971 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-combined-ca-bundle\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.876038 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-etc-swift\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.876056 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-ring-data-devices\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.876168 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-dispersionconf\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.876207 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-scripts\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.876241 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-swiftconf\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.876317 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pn2t7\" (UniqueName: \"kubernetes.io/projected/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-kube-api-access-pn2t7\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.876492 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-etc-swift\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.877001 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-ring-data-devices\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.877031 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-scripts\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.880719 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-dispersionconf\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.881512 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-combined-ca-bundle\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.881640 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-swiftconf\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:57 crc kubenswrapper[4861]: I0129 06:53:57.902821 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pn2t7\" (UniqueName: \"kubernetes.io/projected/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-kube-api-access-pn2t7\") pod \"swift-ring-rebalance-5j2wv\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.017853 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.163756 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.178556 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.283335 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-ring-data-devices\") pod \"0705d705-4857-4f55-a51a-0b9fce72c693\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.283581 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-swiftconf\") pod \"0705d705-4857-4f55-a51a-0b9fce72c693\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.283756 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-scripts\") pod \"0705d705-4857-4f55-a51a-0b9fce72c693\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.283830 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0705d705-4857-4f55-a51a-0b9fce72c693-etc-swift\") pod \"0705d705-4857-4f55-a51a-0b9fce72c693\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.284006 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2h84\" (UniqueName: \"kubernetes.io/projected/0705d705-4857-4f55-a51a-0b9fce72c693-kube-api-access-c2h84\") pod \"0705d705-4857-4f55-a51a-0b9fce72c693\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.284059 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-combined-ca-bundle\") pod \"0705d705-4857-4f55-a51a-0b9fce72c693\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.283998 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "0705d705-4857-4f55-a51a-0b9fce72c693" (UID: "0705d705-4857-4f55-a51a-0b9fce72c693"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.284127 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-dispersionconf\") pod \"0705d705-4857-4f55-a51a-0b9fce72c693\" (UID: \"0705d705-4857-4f55-a51a-0b9fce72c693\") " Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.284391 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0705d705-4857-4f55-a51a-0b9fce72c693-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "0705d705-4857-4f55-a51a-0b9fce72c693" (UID: "0705d705-4857-4f55-a51a-0b9fce72c693"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.284681 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-scripts" (OuterVolumeSpecName: "scripts") pod "0705d705-4857-4f55-a51a-0b9fce72c693" (UID: "0705d705-4857-4f55-a51a-0b9fce72c693"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.286626 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.286662 4861 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0705d705-4857-4f55-a51a-0b9fce72c693-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.286680 4861 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0705d705-4857-4f55-a51a-0b9fce72c693-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.289571 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "0705d705-4857-4f55-a51a-0b9fce72c693" (UID: "0705d705-4857-4f55-a51a-0b9fce72c693"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.290505 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "0705d705-4857-4f55-a51a-0b9fce72c693" (UID: "0705d705-4857-4f55-a51a-0b9fce72c693"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.292296 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0705d705-4857-4f55-a51a-0b9fce72c693-kube-api-access-c2h84" (OuterVolumeSpecName: "kube-api-access-c2h84") pod "0705d705-4857-4f55-a51a-0b9fce72c693" (UID: "0705d705-4857-4f55-a51a-0b9fce72c693"). InnerVolumeSpecName "kube-api-access-c2h84". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.293666 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0705d705-4857-4f55-a51a-0b9fce72c693" (UID: "0705d705-4857-4f55-a51a-0b9fce72c693"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.389618 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2h84\" (UniqueName: \"kubernetes.io/projected/0705d705-4857-4f55-a51a-0b9fce72c693-kube-api-access-c2h84\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.389693 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.389717 4861 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.389739 4861 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0705d705-4857-4f55-a51a-0b9fce72c693-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.516682 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-5j2wv"] Jan 29 06:53:58 crc kubenswrapper[4861]: W0129 06:53:58.521649 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0bd3eaf_c6bd_4fa3_8a68_08334f7777b9.slice/crio-39678c0b309befc6eaca3a6e21adacef97433087953fc2955cd4481cf1a9ac40 WatchSource:0}: Error finding container 39678c0b309befc6eaca3a6e21adacef97433087953fc2955cd4481cf1a9ac40: Status 404 returned error can't find the container with id 39678c0b309befc6eaca3a6e21adacef97433087953fc2955cd4481cf1a9ac40 Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.654051 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.654249 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.746125 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-t55m7"] Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.747289 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-t55m7" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.751015 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.768999 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-t55m7"] Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.799369 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pth9h\" (UniqueName: \"kubernetes.io/projected/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-kube-api-access-pth9h\") pod \"root-account-create-update-t55m7\" (UID: \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\") " pod="openstack/root-account-create-update-t55m7" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.799496 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-operator-scripts\") pod \"root-account-create-update-t55m7\" (UID: \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\") " pod="openstack/root-account-create-update-t55m7" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.874036 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.900487 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pth9h\" (UniqueName: \"kubernetes.io/projected/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-kube-api-access-pth9h\") pod \"root-account-create-update-t55m7\" (UID: \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\") " pod="openstack/root-account-create-update-t55m7" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.900575 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-operator-scripts\") pod \"root-account-create-update-t55m7\" (UID: \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\") " pod="openstack/root-account-create-update-t55m7" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.902417 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-operator-scripts\") pod \"root-account-create-update-t55m7\" (UID: \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\") " pod="openstack/root-account-create-update-t55m7" Jan 29 06:53:58 crc kubenswrapper[4861]: I0129 06:53:58.921556 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pth9h\" (UniqueName: \"kubernetes.io/projected/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-kube-api-access-pth9h\") pod \"root-account-create-update-t55m7\" (UID: \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\") " pod="openstack/root-account-create-update-t55m7" Jan 29 06:53:59 crc kubenswrapper[4861]: I0129 06:53:59.140186 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-t55m7" Jan 29 06:53:59 crc kubenswrapper[4861]: I0129 06:53:59.195990 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-5j2wv" event={"ID":"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9","Type":"ContainerStarted","Data":"39678c0b309befc6eaca3a6e21adacef97433087953fc2955cd4481cf1a9ac40"} Jan 29 06:53:59 crc kubenswrapper[4861]: I0129 06:53:59.196095 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jx6pp" Jan 29 06:53:59 crc kubenswrapper[4861]: I0129 06:53:59.317281 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-jx6pp"] Jan 29 06:53:59 crc kubenswrapper[4861]: I0129 06:53:59.340674 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-jx6pp"] Jan 29 06:53:59 crc kubenswrapper[4861]: I0129 06:53:59.356524 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 29 06:53:59 crc kubenswrapper[4861]: I0129 06:53:59.653773 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-t55m7"] Jan 29 06:53:59 crc kubenswrapper[4861]: W0129 06:53:59.660097 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d3b2bce_2a49_4b85_8aa2_a048f5f54044.slice/crio-91bb3ce6b89a492f3203b22dd497a913ae344ba320ea66eed99634abce364a13 WatchSource:0}: Error finding container 91bb3ce6b89a492f3203b22dd497a913ae344ba320ea66eed99634abce364a13: Status 404 returned error can't find the container with id 91bb3ce6b89a492f3203b22dd497a913ae344ba320ea66eed99634abce364a13 Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.059805 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-kb8t5"] Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.061640 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.073469 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-kb8t5"] Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.173541 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-abe6-account-create-update-45grp"] Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.175097 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.177507 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.183800 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-abe6-account-create-update-45grp"] Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.205551 4861 generic.go:334] "Generic (PLEG): container finished" podID="9d3b2bce-2a49-4b85-8aa2-a048f5f54044" containerID="a2c261703bc18ccdd7760e2b862c9a0b2f7a70eb7e01b0defc65c4d41b28a4a7" exitCode=0 Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.205668 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-t55m7" event={"ID":"9d3b2bce-2a49-4b85-8aa2-a048f5f54044","Type":"ContainerDied","Data":"a2c261703bc18ccdd7760e2b862c9a0b2f7a70eb7e01b0defc65c4d41b28a4a7"} Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.205720 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-t55m7" event={"ID":"9d3b2bce-2a49-4b85-8aa2-a048f5f54044","Type":"ContainerStarted","Data":"91bb3ce6b89a492f3203b22dd497a913ae344ba320ea66eed99634abce364a13"} Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.237895 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjxgx\" (UniqueName: \"kubernetes.io/projected/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-kube-api-access-fjxgx\") pod \"keystone-db-create-kb8t5\" (UID: \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\") " pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.238138 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-operator-scripts\") pod \"keystone-db-create-kb8t5\" (UID: \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\") " pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.339286 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27rcz\" (UniqueName: \"kubernetes.io/projected/4bc33551-a784-45a5-8184-ada61e659999-kube-api-access-27rcz\") pod \"keystone-abe6-account-create-update-45grp\" (UID: \"4bc33551-a784-45a5-8184-ada61e659999\") " pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.339627 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjxgx\" (UniqueName: \"kubernetes.io/projected/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-kube-api-access-fjxgx\") pod \"keystone-db-create-kb8t5\" (UID: \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\") " pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.339737 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-operator-scripts\") pod \"keystone-db-create-kb8t5\" (UID: \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\") " pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.339931 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bc33551-a784-45a5-8184-ada61e659999-operator-scripts\") pod \"keystone-abe6-account-create-update-45grp\" (UID: \"4bc33551-a784-45a5-8184-ada61e659999\") " pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.341796 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-operator-scripts\") pod \"keystone-db-create-kb8t5\" (UID: \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\") " pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.364516 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjxgx\" (UniqueName: \"kubernetes.io/projected/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-kube-api-access-fjxgx\") pod \"keystone-db-create-kb8t5\" (UID: \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\") " pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.419752 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-7lshn"] Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.421530 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7lshn" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.424373 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.426664 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-7lshn"] Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.441628 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bc33551-a784-45a5-8184-ada61e659999-operator-scripts\") pod \"keystone-abe6-account-create-update-45grp\" (UID: \"4bc33551-a784-45a5-8184-ada61e659999\") " pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.441683 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27rcz\" (UniqueName: \"kubernetes.io/projected/4bc33551-a784-45a5-8184-ada61e659999-kube-api-access-27rcz\") pod \"keystone-abe6-account-create-update-45grp\" (UID: \"4bc33551-a784-45a5-8184-ada61e659999\") " pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.442525 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bc33551-a784-45a5-8184-ada61e659999-operator-scripts\") pod \"keystone-abe6-account-create-update-45grp\" (UID: \"4bc33551-a784-45a5-8184-ada61e659999\") " pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.459819 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27rcz\" (UniqueName: \"kubernetes.io/projected/4bc33551-a784-45a5-8184-ada61e659999-kube-api-access-27rcz\") pod \"keystone-abe6-account-create-update-45grp\" (UID: \"4bc33551-a784-45a5-8184-ada61e659999\") " pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.492722 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.512761 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-98fb-account-create-update-cwrzk"] Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.513825 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.518825 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.523655 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-98fb-account-create-update-cwrzk"] Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.543649 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqsmx\" (UniqueName: \"kubernetes.io/projected/b743a02f-e9d8-4580-a3fa-230bbfbfea83-kube-api-access-bqsmx\") pod \"placement-db-create-7lshn\" (UID: \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\") " pod="openstack/placement-db-create-7lshn" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.543780 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b743a02f-e9d8-4580-a3fa-230bbfbfea83-operator-scripts\") pod \"placement-db-create-7lshn\" (UID: \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\") " pod="openstack/placement-db-create-7lshn" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.629316 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.629409 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.629498 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.630535 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6286a50c0abd3320b1618a8f91d0446eb73b0dae9310f72e53e305c4914d0508"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.630603 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://6286a50c0abd3320b1618a8f91d0446eb73b0dae9310f72e53e305c4914d0508" gracePeriod=600 Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.645971 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vnb6\" (UniqueName: \"kubernetes.io/projected/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-kube-api-access-4vnb6\") pod \"placement-98fb-account-create-update-cwrzk\" (UID: \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\") " pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.646018 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqsmx\" (UniqueName: \"kubernetes.io/projected/b743a02f-e9d8-4580-a3fa-230bbfbfea83-kube-api-access-bqsmx\") pod \"placement-db-create-7lshn\" (UID: \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\") " pod="openstack/placement-db-create-7lshn" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.646046 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-operator-scripts\") pod \"placement-98fb-account-create-update-cwrzk\" (UID: \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\") " pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.646542 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b743a02f-e9d8-4580-a3fa-230bbfbfea83-operator-scripts\") pod \"placement-db-create-7lshn\" (UID: \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\") " pod="openstack/placement-db-create-7lshn" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.647377 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b743a02f-e9d8-4580-a3fa-230bbfbfea83-operator-scripts\") pod \"placement-db-create-7lshn\" (UID: \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\") " pod="openstack/placement-db-create-7lshn" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.664611 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqsmx\" (UniqueName: \"kubernetes.io/projected/b743a02f-e9d8-4580-a3fa-230bbfbfea83-kube-api-access-bqsmx\") pod \"placement-db-create-7lshn\" (UID: \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\") " pod="openstack/placement-db-create-7lshn" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.745672 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7lshn" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.748587 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vnb6\" (UniqueName: \"kubernetes.io/projected/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-kube-api-access-4vnb6\") pod \"placement-98fb-account-create-update-cwrzk\" (UID: \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\") " pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.748654 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-operator-scripts\") pod \"placement-98fb-account-create-update-cwrzk\" (UID: \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\") " pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.749736 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-operator-scripts\") pod \"placement-98fb-account-create-update-cwrzk\" (UID: \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\") " pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.769536 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vnb6\" (UniqueName: \"kubernetes.io/projected/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-kube-api-access-4vnb6\") pod \"placement-98fb-account-create-update-cwrzk\" (UID: \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\") " pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:00 crc kubenswrapper[4861]: I0129 06:54:00.835273 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:01 crc kubenswrapper[4861]: I0129 06:54:01.127869 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0705d705-4857-4f55-a51a-0b9fce72c693" path="/var/lib/kubelet/pods/0705d705-4857-4f55-a51a-0b9fce72c693/volumes" Jan 29 06:54:01 crc kubenswrapper[4861]: I0129 06:54:01.215525 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="6286a50c0abd3320b1618a8f91d0446eb73b0dae9310f72e53e305c4914d0508" exitCode=0 Jan 29 06:54:01 crc kubenswrapper[4861]: I0129 06:54:01.215808 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"6286a50c0abd3320b1618a8f91d0446eb73b0dae9310f72e53e305c4914d0508"} Jan 29 06:54:01 crc kubenswrapper[4861]: I0129 06:54:01.215859 4861 scope.go:117] "RemoveContainer" containerID="a3e571fb457ea966d33ae87dfd58f64d47243c7a436da1c6aa743ed114c9efd5" Jan 29 06:54:01 crc kubenswrapper[4861]: I0129 06:54:01.463204 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:54:01 crc kubenswrapper[4861]: E0129 06:54:01.463476 4861 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 06:54:01 crc kubenswrapper[4861]: E0129 06:54:01.463526 4861 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 06:54:01 crc kubenswrapper[4861]: E0129 06:54:01.463604 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift podName:7d54030c-d725-4a6c-ad29-d84482378f20 nodeName:}" failed. No retries permitted until 2026-01-29 06:54:09.463564662 +0000 UTC m=+1141.135059219 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift") pod "swift-storage-0" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20") : configmap "swift-ring-files" not found Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.421902 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-t55m7" Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.592725 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pth9h\" (UniqueName: \"kubernetes.io/projected/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-kube-api-access-pth9h\") pod \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\" (UID: \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\") " Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.593439 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-operator-scripts\") pod \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\" (UID: \"9d3b2bce-2a49-4b85-8aa2-a048f5f54044\") " Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.594434 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9d3b2bce-2a49-4b85-8aa2-a048f5f54044" (UID: "9d3b2bce-2a49-4b85-8aa2-a048f5f54044"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.603134 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-kube-api-access-pth9h" (OuterVolumeSpecName: "kube-api-access-pth9h") pod "9d3b2bce-2a49-4b85-8aa2-a048f5f54044" (UID: "9d3b2bce-2a49-4b85-8aa2-a048f5f54044"). InnerVolumeSpecName "kube-api-access-pth9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.695397 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pth9h\" (UniqueName: \"kubernetes.io/projected/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-kube-api-access-pth9h\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.695431 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d3b2bce-2a49-4b85-8aa2-a048f5f54044-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.700535 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-kb8t5"] Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.852808 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-abe6-account-create-update-45grp"] Jan 29 06:54:02 crc kubenswrapper[4861]: W0129 06:54:02.857087 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4bc33551_a784_45a5_8184_ada61e659999.slice/crio-986ac49c5897c8827374ad8d1ba096a2d847ab28e8dd8447de693ac92fa5abb7 WatchSource:0}: Error finding container 986ac49c5897c8827374ad8d1ba096a2d847ab28e8dd8447de693ac92fa5abb7: Status 404 returned error can't find the container with id 986ac49c5897c8827374ad8d1ba096a2d847ab28e8dd8447de693ac92fa5abb7 Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.869461 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-98fb-account-create-update-cwrzk"] Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.890217 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.971164 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-fvf8f"] Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.971607 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" podUID="76403591-49dd-48f3-976f-34a9b6d7ba8a" containerName="dnsmasq-dns" containerID="cri-o://af43cd9a15bdcd0bf0800a6a9adb43727ef4cf877e39df92be7c761c11265621" gracePeriod=10 Jan 29 06:54:02 crc kubenswrapper[4861]: I0129 06:54:02.995807 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-7lshn"] Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.271397 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-abe6-account-create-update-45grp" event={"ID":"4bc33551-a784-45a5-8184-ada61e659999","Type":"ContainerStarted","Data":"db29cf3498e6242cc345136e8b4ef9c1ff58fae25025e65ba05baf083fa22586"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.271441 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-abe6-account-create-update-45grp" event={"ID":"4bc33551-a784-45a5-8184-ada61e659999","Type":"ContainerStarted","Data":"986ac49c5897c8827374ad8d1ba096a2d847ab28e8dd8447de693ac92fa5abb7"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.295481 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"85000e70e0f61206c55ed7e3495b90975c6a190d05beb488bbd436b08d076e87"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.298387 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-abe6-account-create-update-45grp" podStartSLOduration=3.29837129 podStartE2EDuration="3.29837129s" podCreationTimestamp="2026-01-29 06:54:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:03.295421435 +0000 UTC m=+1134.966915992" watchObservedRunningTime="2026-01-29 06:54:03.29837129 +0000 UTC m=+1134.969865847" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.305687 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-t55m7" event={"ID":"9d3b2bce-2a49-4b85-8aa2-a048f5f54044","Type":"ContainerDied","Data":"91bb3ce6b89a492f3203b22dd497a913ae344ba320ea66eed99634abce364a13"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.305729 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91bb3ce6b89a492f3203b22dd497a913ae344ba320ea66eed99634abce364a13" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.305789 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-t55m7" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.315396 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-kb8t5" event={"ID":"70ddea03-eaa7-41ff-8bfe-b050ef7848b5","Type":"ContainerStarted","Data":"eb5b13bfbfc3b8b7eee2ed233fc25bde57155c21687160ab5efe54426a560771"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.315442 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-kb8t5" event={"ID":"70ddea03-eaa7-41ff-8bfe-b050ef7848b5","Type":"ContainerStarted","Data":"ec32749231be283f3b5cd8a918674a3ff15fab206996d9fb567570d5cfe52a4f"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.317660 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7lshn" event={"ID":"b743a02f-e9d8-4580-a3fa-230bbfbfea83","Type":"ContainerStarted","Data":"0a6381b30a52746dc07e6a1f0239822352ac85f3ad3580dcdb9a50d1efe8a41c"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.325314 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-98fb-account-create-update-cwrzk" event={"ID":"46b65d05-b04a-4e60-b22f-d47aa0ef69e4","Type":"ContainerStarted","Data":"7438af59a13cf43dd170a4e1157286f11336dd7d53efd066618939d16c7a2b84"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.325353 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-98fb-account-create-update-cwrzk" event={"ID":"46b65d05-b04a-4e60-b22f-d47aa0ef69e4","Type":"ContainerStarted","Data":"5387c369dfc5e1d4e586c1f9ccd633eb72b72c44b1fab9e01301565fd13ba8e2"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.329264 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-5j2wv" event={"ID":"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9","Type":"ContainerStarted","Data":"f3570debc62991c7c1fa0d77b1dc28def8d437df67468235887383ae41cd48d3"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.338406 4861 generic.go:334] "Generic (PLEG): container finished" podID="76403591-49dd-48f3-976f-34a9b6d7ba8a" containerID="af43cd9a15bdcd0bf0800a6a9adb43727ef4cf877e39df92be7c761c11265621" exitCode=0 Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.338445 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" event={"ID":"76403591-49dd-48f3-976f-34a9b6d7ba8a","Type":"ContainerDied","Data":"af43cd9a15bdcd0bf0800a6a9adb43727ef4cf877e39df92be7c761c11265621"} Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.361313 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-5j2wv" podStartSLOduration=2.481236346 podStartE2EDuration="6.361295318s" podCreationTimestamp="2026-01-29 06:53:57 +0000 UTC" firstStartedPulling="2026-01-29 06:53:58.525599705 +0000 UTC m=+1130.197094302" lastFinishedPulling="2026-01-29 06:54:02.405658717 +0000 UTC m=+1134.077153274" observedRunningTime="2026-01-29 06:54:03.35238024 +0000 UTC m=+1135.023874817" watchObservedRunningTime="2026-01-29 06:54:03.361295318 +0000 UTC m=+1135.032789865" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.374647 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-kb8t5" podStartSLOduration=3.374616128 podStartE2EDuration="3.374616128s" podCreationTimestamp="2026-01-29 06:54:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:03.366540122 +0000 UTC m=+1135.038034689" watchObservedRunningTime="2026-01-29 06:54:03.374616128 +0000 UTC m=+1135.046110685" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.383929 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-98fb-account-create-update-cwrzk" podStartSLOduration=3.3839227259999998 podStartE2EDuration="3.383922726s" podCreationTimestamp="2026-01-29 06:54:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:03.382087489 +0000 UTC m=+1135.053582046" watchObservedRunningTime="2026-01-29 06:54:03.383922726 +0000 UTC m=+1135.055417283" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.770441 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.915404 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-dns-svc\") pod \"76403591-49dd-48f3-976f-34a9b6d7ba8a\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.915619 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-config\") pod \"76403591-49dd-48f3-976f-34a9b6d7ba8a\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.915806 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnntw\" (UniqueName: \"kubernetes.io/projected/76403591-49dd-48f3-976f-34a9b6d7ba8a-kube-api-access-gnntw\") pod \"76403591-49dd-48f3-976f-34a9b6d7ba8a\" (UID: \"76403591-49dd-48f3-976f-34a9b6d7ba8a\") " Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.937245 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76403591-49dd-48f3-976f-34a9b6d7ba8a-kube-api-access-gnntw" (OuterVolumeSpecName: "kube-api-access-gnntw") pod "76403591-49dd-48f3-976f-34a9b6d7ba8a" (UID: "76403591-49dd-48f3-976f-34a9b6d7ba8a"). InnerVolumeSpecName "kube-api-access-gnntw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.980270 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "76403591-49dd-48f3-976f-34a9b6d7ba8a" (UID: "76403591-49dd-48f3-976f-34a9b6d7ba8a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:03 crc kubenswrapper[4861]: I0129 06:54:03.981504 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-config" (OuterVolumeSpecName: "config") pod "76403591-49dd-48f3-976f-34a9b6d7ba8a" (UID: "76403591-49dd-48f3-976f-34a9b6d7ba8a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.018914 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.018948 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnntw\" (UniqueName: \"kubernetes.io/projected/76403591-49dd-48f3-976f-34a9b6d7ba8a-kube-api-access-gnntw\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.019114 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/76403591-49dd-48f3-976f-34a9b6d7ba8a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.347128 4861 generic.go:334] "Generic (PLEG): container finished" podID="46b65d05-b04a-4e60-b22f-d47aa0ef69e4" containerID="7438af59a13cf43dd170a4e1157286f11336dd7d53efd066618939d16c7a2b84" exitCode=0 Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.347385 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-98fb-account-create-update-cwrzk" event={"ID":"46b65d05-b04a-4e60-b22f-d47aa0ef69e4","Type":"ContainerDied","Data":"7438af59a13cf43dd170a4e1157286f11336dd7d53efd066618939d16c7a2b84"} Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.349760 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" event={"ID":"76403591-49dd-48f3-976f-34a9b6d7ba8a","Type":"ContainerDied","Data":"1099740a029dfcee3f8ce345007a41584cfd2a73bcfb51d527d96d383fed1517"} Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.349796 4861 scope.go:117] "RemoveContainer" containerID="af43cd9a15bdcd0bf0800a6a9adb43727ef4cf877e39df92be7c761c11265621" Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.350030 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-fvf8f" Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.351323 4861 generic.go:334] "Generic (PLEG): container finished" podID="4bc33551-a784-45a5-8184-ada61e659999" containerID="db29cf3498e6242cc345136e8b4ef9c1ff58fae25025e65ba05baf083fa22586" exitCode=0 Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.351379 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-abe6-account-create-update-45grp" event={"ID":"4bc33551-a784-45a5-8184-ada61e659999","Type":"ContainerDied","Data":"db29cf3498e6242cc345136e8b4ef9c1ff58fae25025e65ba05baf083fa22586"} Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.354001 4861 generic.go:334] "Generic (PLEG): container finished" podID="70ddea03-eaa7-41ff-8bfe-b050ef7848b5" containerID="eb5b13bfbfc3b8b7eee2ed233fc25bde57155c21687160ab5efe54426a560771" exitCode=0 Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.354096 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-kb8t5" event={"ID":"70ddea03-eaa7-41ff-8bfe-b050ef7848b5","Type":"ContainerDied","Data":"eb5b13bfbfc3b8b7eee2ed233fc25bde57155c21687160ab5efe54426a560771"} Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.356777 4861 generic.go:334] "Generic (PLEG): container finished" podID="b743a02f-e9d8-4580-a3fa-230bbfbfea83" containerID="44cae61274ae498ec0e36b040cd3c6a823e229c9ed4442ae55a7ad53836e9dd2" exitCode=0 Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.358244 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7lshn" event={"ID":"b743a02f-e9d8-4580-a3fa-230bbfbfea83","Type":"ContainerDied","Data":"44cae61274ae498ec0e36b040cd3c6a823e229c9ed4442ae55a7ad53836e9dd2"} Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.367315 4861 scope.go:117] "RemoveContainer" containerID="71586b55f6fbb29ba2e6038dd24e6ed3fa5ed8e5a0decb745ccf4808995a47c4" Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.458095 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-fvf8f"] Jan 29 06:54:04 crc kubenswrapper[4861]: I0129 06:54:04.464554 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-fvf8f"] Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.130684 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76403591-49dd-48f3-976f-34a9b6d7ba8a" path="/var/lib/kubelet/pods/76403591-49dd-48f3-976f-34a9b6d7ba8a/volumes" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.372405 4861 generic.go:334] "Generic (PLEG): container finished" podID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerID="093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6" exitCode=0 Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.372516 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5966cedc-8ab5-4390-906b-c5ac39333e09","Type":"ContainerDied","Data":"093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6"} Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.375063 4861 generic.go:334] "Generic (PLEG): container finished" podID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerID="2808f05e16652e14e080ca41ff8920b6abdf36850a94e302e37f9f0b96a4b421" exitCode=0 Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.375331 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b8b1385-123a-4b60-af39-82d6492a65c2","Type":"ContainerDied","Data":"2808f05e16652e14e080ca41ff8920b6abdf36850a94e302e37f9f0b96a4b421"} Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.712145 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.731120 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-m775k"] Jan 29 06:54:05 crc kubenswrapper[4861]: E0129 06:54:05.731749 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76403591-49dd-48f3-976f-34a9b6d7ba8a" containerName="dnsmasq-dns" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.731788 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="76403591-49dd-48f3-976f-34a9b6d7ba8a" containerName="dnsmasq-dns" Jan 29 06:54:05 crc kubenswrapper[4861]: E0129 06:54:05.731831 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d3b2bce-2a49-4b85-8aa2-a048f5f54044" containerName="mariadb-account-create-update" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.731838 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d3b2bce-2a49-4b85-8aa2-a048f5f54044" containerName="mariadb-account-create-update" Jan 29 06:54:05 crc kubenswrapper[4861]: E0129 06:54:05.731850 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46b65d05-b04a-4e60-b22f-d47aa0ef69e4" containerName="mariadb-account-create-update" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.731870 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="46b65d05-b04a-4e60-b22f-d47aa0ef69e4" containerName="mariadb-account-create-update" Jan 29 06:54:05 crc kubenswrapper[4861]: E0129 06:54:05.731885 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76403591-49dd-48f3-976f-34a9b6d7ba8a" containerName="init" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.731891 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="76403591-49dd-48f3-976f-34a9b6d7ba8a" containerName="init" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.732134 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d3b2bce-2a49-4b85-8aa2-a048f5f54044" containerName="mariadb-account-create-update" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.732165 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="46b65d05-b04a-4e60-b22f-d47aa0ef69e4" containerName="mariadb-account-create-update" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.732173 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="76403591-49dd-48f3-976f-34a9b6d7ba8a" containerName="dnsmasq-dns" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.732928 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-m775k" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.759917 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-m775k"] Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.855463 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-operator-scripts\") pod \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\" (UID: \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\") " Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.855961 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vnb6\" (UniqueName: \"kubernetes.io/projected/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-kube-api-access-4vnb6\") pod \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\" (UID: \"46b65d05-b04a-4e60-b22f-d47aa0ef69e4\") " Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.856276 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-operator-scripts\") pod \"glance-db-create-m775k\" (UID: \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\") " pod="openstack/glance-db-create-m775k" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.856515 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8mzr\" (UniqueName: \"kubernetes.io/projected/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-kube-api-access-s8mzr\") pod \"glance-db-create-m775k\" (UID: \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\") " pod="openstack/glance-db-create-m775k" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.858631 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "46b65d05-b04a-4e60-b22f-d47aa0ef69e4" (UID: "46b65d05-b04a-4e60-b22f-d47aa0ef69e4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.870203 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-kube-api-access-4vnb6" (OuterVolumeSpecName: "kube-api-access-4vnb6") pod "46b65d05-b04a-4e60-b22f-d47aa0ef69e4" (UID: "46b65d05-b04a-4e60-b22f-d47aa0ef69e4"). InnerVolumeSpecName "kube-api-access-4vnb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.870498 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-c008-account-create-update-7xcsr"] Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.871714 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.877036 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-c008-account-create-update-7xcsr"] Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.878700 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.878891 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.891213 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7lshn" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.921541 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.970505 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqsmx\" (UniqueName: \"kubernetes.io/projected/b743a02f-e9d8-4580-a3fa-230bbfbfea83-kube-api-access-bqsmx\") pod \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\" (UID: \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\") " Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.970559 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-operator-scripts\") pod \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\" (UID: \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\") " Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.970582 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b743a02f-e9d8-4580-a3fa-230bbfbfea83-operator-scripts\") pod \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\" (UID: \"b743a02f-e9d8-4580-a3fa-230bbfbfea83\") " Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.970651 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjxgx\" (UniqueName: \"kubernetes.io/projected/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-kube-api-access-fjxgx\") pod \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\" (UID: \"70ddea03-eaa7-41ff-8bfe-b050ef7848b5\") " Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.972752 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhk7t\" (UniqueName: \"kubernetes.io/projected/27c095ab-2da0-40f5-b361-e40819c7b3aa-kube-api-access-nhk7t\") pod \"glance-c008-account-create-update-7xcsr\" (UID: \"27c095ab-2da0-40f5-b361-e40819c7b3aa\") " pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.972932 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8mzr\" (UniqueName: \"kubernetes.io/projected/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-kube-api-access-s8mzr\") pod \"glance-db-create-m775k\" (UID: \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\") " pod="openstack/glance-db-create-m775k" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.972988 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-operator-scripts\") pod \"glance-db-create-m775k\" (UID: \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\") " pod="openstack/glance-db-create-m775k" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.973057 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27c095ab-2da0-40f5-b361-e40819c7b3aa-operator-scripts\") pod \"glance-c008-account-create-update-7xcsr\" (UID: \"27c095ab-2da0-40f5-b361-e40819c7b3aa\") " pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.973134 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vnb6\" (UniqueName: \"kubernetes.io/projected/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-kube-api-access-4vnb6\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.973150 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46b65d05-b04a-4e60-b22f-d47aa0ef69e4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.973984 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "70ddea03-eaa7-41ff-8bfe-b050ef7848b5" (UID: "70ddea03-eaa7-41ff-8bfe-b050ef7848b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.974469 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b743a02f-e9d8-4580-a3fa-230bbfbfea83-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b743a02f-e9d8-4580-a3fa-230bbfbfea83" (UID: "b743a02f-e9d8-4580-a3fa-230bbfbfea83"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.974691 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-operator-scripts\") pod \"glance-db-create-m775k\" (UID: \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\") " pod="openstack/glance-db-create-m775k" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.976500 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b743a02f-e9d8-4580-a3fa-230bbfbfea83-kube-api-access-bqsmx" (OuterVolumeSpecName: "kube-api-access-bqsmx") pod "b743a02f-e9d8-4580-a3fa-230bbfbfea83" (UID: "b743a02f-e9d8-4580-a3fa-230bbfbfea83"). InnerVolumeSpecName "kube-api-access-bqsmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.977136 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-kube-api-access-fjxgx" (OuterVolumeSpecName: "kube-api-access-fjxgx") pod "70ddea03-eaa7-41ff-8bfe-b050ef7848b5" (UID: "70ddea03-eaa7-41ff-8bfe-b050ef7848b5"). InnerVolumeSpecName "kube-api-access-fjxgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:05 crc kubenswrapper[4861]: I0129 06:54:05.990735 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8mzr\" (UniqueName: \"kubernetes.io/projected/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-kube-api-access-s8mzr\") pod \"glance-db-create-m775k\" (UID: \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\") " pod="openstack/glance-db-create-m775k" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.068275 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-m775k" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.074370 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bc33551-a784-45a5-8184-ada61e659999-operator-scripts\") pod \"4bc33551-a784-45a5-8184-ada61e659999\" (UID: \"4bc33551-a784-45a5-8184-ada61e659999\") " Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.074593 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27rcz\" (UniqueName: \"kubernetes.io/projected/4bc33551-a784-45a5-8184-ada61e659999-kube-api-access-27rcz\") pod \"4bc33551-a784-45a5-8184-ada61e659999\" (UID: \"4bc33551-a784-45a5-8184-ada61e659999\") " Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.074838 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27c095ab-2da0-40f5-b361-e40819c7b3aa-operator-scripts\") pod \"glance-c008-account-create-update-7xcsr\" (UID: \"27c095ab-2da0-40f5-b361-e40819c7b3aa\") " pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.074946 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhk7t\" (UniqueName: \"kubernetes.io/projected/27c095ab-2da0-40f5-b361-e40819c7b3aa-kube-api-access-nhk7t\") pod \"glance-c008-account-create-update-7xcsr\" (UID: \"27c095ab-2da0-40f5-b361-e40819c7b3aa\") " pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.075136 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqsmx\" (UniqueName: \"kubernetes.io/projected/b743a02f-e9d8-4580-a3fa-230bbfbfea83-kube-api-access-bqsmx\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.075199 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.075276 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b743a02f-e9d8-4580-a3fa-230bbfbfea83-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.075335 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjxgx\" (UniqueName: \"kubernetes.io/projected/70ddea03-eaa7-41ff-8bfe-b050ef7848b5-kube-api-access-fjxgx\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.080102 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bc33551-a784-45a5-8184-ada61e659999-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4bc33551-a784-45a5-8184-ada61e659999" (UID: "4bc33551-a784-45a5-8184-ada61e659999"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.080328 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bc33551-a784-45a5-8184-ada61e659999-kube-api-access-27rcz" (OuterVolumeSpecName: "kube-api-access-27rcz") pod "4bc33551-a784-45a5-8184-ada61e659999" (UID: "4bc33551-a784-45a5-8184-ada61e659999"). InnerVolumeSpecName "kube-api-access-27rcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.081638 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27c095ab-2da0-40f5-b361-e40819c7b3aa-operator-scripts\") pod \"glance-c008-account-create-update-7xcsr\" (UID: \"27c095ab-2da0-40f5-b361-e40819c7b3aa\") " pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.106193 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhk7t\" (UniqueName: \"kubernetes.io/projected/27c095ab-2da0-40f5-b361-e40819c7b3aa-kube-api-access-nhk7t\") pod \"glance-c008-account-create-update-7xcsr\" (UID: \"27c095ab-2da0-40f5-b361-e40819c7b3aa\") " pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.176943 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bc33551-a784-45a5-8184-ada61e659999-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.176963 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27rcz\" (UniqueName: \"kubernetes.io/projected/4bc33551-a784-45a5-8184-ada61e659999-kube-api-access-27rcz\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.221536 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.385591 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b8b1385-123a-4b60-af39-82d6492a65c2","Type":"ContainerStarted","Data":"df85d7b79b6e3d17ea7765b219c520d147b988eede5ce5119c6fe36e62177544"} Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.387171 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.389925 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-98fb-account-create-update-cwrzk" event={"ID":"46b65d05-b04a-4e60-b22f-d47aa0ef69e4","Type":"ContainerDied","Data":"5387c369dfc5e1d4e586c1f9ccd633eb72b72c44b1fab9e01301565fd13ba8e2"} Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.389956 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5387c369dfc5e1d4e586c1f9ccd633eb72b72c44b1fab9e01301565fd13ba8e2" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.390026 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-98fb-account-create-update-cwrzk" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.401429 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-abe6-account-create-update-45grp" event={"ID":"4bc33551-a784-45a5-8184-ada61e659999","Type":"ContainerDied","Data":"986ac49c5897c8827374ad8d1ba096a2d847ab28e8dd8447de693ac92fa5abb7"} Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.401472 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="986ac49c5897c8827374ad8d1ba096a2d847ab28e8dd8447de693ac92fa5abb7" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.401622 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-abe6-account-create-update-45grp" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.404388 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5966cedc-8ab5-4390-906b-c5ac39333e09","Type":"ContainerStarted","Data":"b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777"} Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.407292 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.411124 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-kb8t5" event={"ID":"70ddea03-eaa7-41ff-8bfe-b050ef7848b5","Type":"ContainerDied","Data":"ec32749231be283f3b5cd8a918674a3ff15fab206996d9fb567570d5cfe52a4f"} Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.411159 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec32749231be283f3b5cd8a918674a3ff15fab206996d9fb567570d5cfe52a4f" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.411355 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-kb8t5" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.427873 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.631117785 podStartE2EDuration="52.427846769s" podCreationTimestamp="2026-01-29 06:53:14 +0000 UTC" firstStartedPulling="2026-01-29 06:53:16.599906954 +0000 UTC m=+1088.271401511" lastFinishedPulling="2026-01-29 06:53:31.396635938 +0000 UTC m=+1103.068130495" observedRunningTime="2026-01-29 06:54:06.408927816 +0000 UTC m=+1138.080422403" watchObservedRunningTime="2026-01-29 06:54:06.427846769 +0000 UTC m=+1138.099341326" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.433510 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7lshn" event={"ID":"b743a02f-e9d8-4580-a3fa-230bbfbfea83","Type":"ContainerDied","Data":"0a6381b30a52746dc07e6a1f0239822352ac85f3ad3580dcdb9a50d1efe8a41c"} Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.433549 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a6381b30a52746dc07e6a1f0239822352ac85f3ad3580dcdb9a50d1efe8a41c" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.433603 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7lshn" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.441491 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=51.441467837 podStartE2EDuration="51.441467837s" podCreationTimestamp="2026-01-29 06:53:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:06.438332097 +0000 UTC m=+1138.109826664" watchObservedRunningTime="2026-01-29 06:54:06.441467837 +0000 UTC m=+1138.112962394" Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.531475 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-m775k"] Jan 29 06:54:06 crc kubenswrapper[4861]: I0129 06:54:06.722659 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-c008-account-create-update-7xcsr"] Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.306755 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-t55m7"] Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.316320 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-t55m7"] Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.387057 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-n8bb6"] Jan 29 06:54:07 crc kubenswrapper[4861]: E0129 06:54:07.387499 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bc33551-a784-45a5-8184-ada61e659999" containerName="mariadb-account-create-update" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.387528 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bc33551-a784-45a5-8184-ada61e659999" containerName="mariadb-account-create-update" Jan 29 06:54:07 crc kubenswrapper[4861]: E0129 06:54:07.387561 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70ddea03-eaa7-41ff-8bfe-b050ef7848b5" containerName="mariadb-database-create" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.387572 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="70ddea03-eaa7-41ff-8bfe-b050ef7848b5" containerName="mariadb-database-create" Jan 29 06:54:07 crc kubenswrapper[4861]: E0129 06:54:07.387598 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b743a02f-e9d8-4580-a3fa-230bbfbfea83" containerName="mariadb-database-create" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.387607 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b743a02f-e9d8-4580-a3fa-230bbfbfea83" containerName="mariadb-database-create" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.387804 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b743a02f-e9d8-4580-a3fa-230bbfbfea83" containerName="mariadb-database-create" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.387832 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bc33551-a784-45a5-8184-ada61e659999" containerName="mariadb-account-create-update" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.387849 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="70ddea03-eaa7-41ff-8bfe-b050ef7848b5" containerName="mariadb-database-create" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.388494 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.390423 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.405751 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-n8bb6"] Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.441816 4861 generic.go:334] "Generic (PLEG): container finished" podID="081e4fcd-2dd7-4e2b-b276-28a0ec4f7110" containerID="4156976b77fcc59e40ba688e6dad758e650f38eff0bf2ea70256a0007e7bb53f" exitCode=0 Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.441869 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-m775k" event={"ID":"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110","Type":"ContainerDied","Data":"4156976b77fcc59e40ba688e6dad758e650f38eff0bf2ea70256a0007e7bb53f"} Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.441891 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-m775k" event={"ID":"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110","Type":"ContainerStarted","Data":"5ef962f5ac7e37ee7a2e2018010f622f49b9dfd7e0a94289559338d6cdb35784"} Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.443723 4861 generic.go:334] "Generic (PLEG): container finished" podID="27c095ab-2da0-40f5-b361-e40819c7b3aa" containerID="cb513fa4a3eb6b1c3c04f73daa8fa7574de85c7d15fbf53dfe97301e93d37653" exitCode=0 Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.444470 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c008-account-create-update-7xcsr" event={"ID":"27c095ab-2da0-40f5-b361-e40819c7b3aa","Type":"ContainerDied","Data":"cb513fa4a3eb6b1c3c04f73daa8fa7574de85c7d15fbf53dfe97301e93d37653"} Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.444490 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c008-account-create-update-7xcsr" event={"ID":"27c095ab-2da0-40f5-b361-e40819c7b3aa","Type":"ContainerStarted","Data":"38352da22a953382237e07cd1b64049194a8d11ae826d1c71dd905e5543611af"} Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.497918 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbzc2\" (UniqueName: \"kubernetes.io/projected/b379c23e-2bef-4aa8-b656-e6c152261562-kube-api-access-zbzc2\") pod \"root-account-create-update-n8bb6\" (UID: \"b379c23e-2bef-4aa8-b656-e6c152261562\") " pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.499154 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b379c23e-2bef-4aa8-b656-e6c152261562-operator-scripts\") pod \"root-account-create-update-n8bb6\" (UID: \"b379c23e-2bef-4aa8-b656-e6c152261562\") " pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.601245 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbzc2\" (UniqueName: \"kubernetes.io/projected/b379c23e-2bef-4aa8-b656-e6c152261562-kube-api-access-zbzc2\") pod \"root-account-create-update-n8bb6\" (UID: \"b379c23e-2bef-4aa8-b656-e6c152261562\") " pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.601420 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b379c23e-2bef-4aa8-b656-e6c152261562-operator-scripts\") pod \"root-account-create-update-n8bb6\" (UID: \"b379c23e-2bef-4aa8-b656-e6c152261562\") " pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.602321 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b379c23e-2bef-4aa8-b656-e6c152261562-operator-scripts\") pod \"root-account-create-update-n8bb6\" (UID: \"b379c23e-2bef-4aa8-b656-e6c152261562\") " pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.637130 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbzc2\" (UniqueName: \"kubernetes.io/projected/b379c23e-2bef-4aa8-b656-e6c152261562-kube-api-access-zbzc2\") pod \"root-account-create-update-n8bb6\" (UID: \"b379c23e-2bef-4aa8-b656-e6c152261562\") " pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:07 crc kubenswrapper[4861]: I0129 06:54:07.721002 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.047982 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-n8bb6"] Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.455490 4861 generic.go:334] "Generic (PLEG): container finished" podID="b379c23e-2bef-4aa8-b656-e6c152261562" containerID="2edde80755c98a48c8d503470908a51b138482e45d8b8affe30999cf1fe693fe" exitCode=0 Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.455681 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-n8bb6" event={"ID":"b379c23e-2bef-4aa8-b656-e6c152261562","Type":"ContainerDied","Data":"2edde80755c98a48c8d503470908a51b138482e45d8b8affe30999cf1fe693fe"} Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.456067 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-n8bb6" event={"ID":"b379c23e-2bef-4aa8-b656-e6c152261562","Type":"ContainerStarted","Data":"a16695c0a8fe6b307c07ae5675b286ed61621b11c3b3648a0f37583be630f50e"} Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.872443 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-m775k" Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.879687 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.927375 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8mzr\" (UniqueName: \"kubernetes.io/projected/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-kube-api-access-s8mzr\") pod \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\" (UID: \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\") " Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.927752 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhk7t\" (UniqueName: \"kubernetes.io/projected/27c095ab-2da0-40f5-b361-e40819c7b3aa-kube-api-access-nhk7t\") pod \"27c095ab-2da0-40f5-b361-e40819c7b3aa\" (UID: \"27c095ab-2da0-40f5-b361-e40819c7b3aa\") " Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.927913 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-operator-scripts\") pod \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\" (UID: \"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110\") " Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.927976 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27c095ab-2da0-40f5-b361-e40819c7b3aa-operator-scripts\") pod \"27c095ab-2da0-40f5-b361-e40819c7b3aa\" (UID: \"27c095ab-2da0-40f5-b361-e40819c7b3aa\") " Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.928584 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "081e4fcd-2dd7-4e2b-b276-28a0ec4f7110" (UID: "081e4fcd-2dd7-4e2b-b276-28a0ec4f7110"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.928693 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27c095ab-2da0-40f5-b361-e40819c7b3aa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27c095ab-2da0-40f5-b361-e40819c7b3aa" (UID: "27c095ab-2da0-40f5-b361-e40819c7b3aa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.933207 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-kube-api-access-s8mzr" (OuterVolumeSpecName: "kube-api-access-s8mzr") pod "081e4fcd-2dd7-4e2b-b276-28a0ec4f7110" (UID: "081e4fcd-2dd7-4e2b-b276-28a0ec4f7110"). InnerVolumeSpecName "kube-api-access-s8mzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:08 crc kubenswrapper[4861]: I0129 06:54:08.933236 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27c095ab-2da0-40f5-b361-e40819c7b3aa-kube-api-access-nhk7t" (OuterVolumeSpecName: "kube-api-access-nhk7t") pod "27c095ab-2da0-40f5-b361-e40819c7b3aa" (UID: "27c095ab-2da0-40f5-b361-e40819c7b3aa"). InnerVolumeSpecName "kube-api-access-nhk7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.030334 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.030374 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27c095ab-2da0-40f5-b361-e40819c7b3aa-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.030384 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8mzr\" (UniqueName: \"kubernetes.io/projected/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110-kube-api-access-s8mzr\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.030394 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhk7t\" (UniqueName: \"kubernetes.io/projected/27c095ab-2da0-40f5-b361-e40819c7b3aa-kube-api-access-nhk7t\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.130417 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d3b2bce-2a49-4b85-8aa2-a048f5f54044" path="/var/lib/kubelet/pods/9d3b2bce-2a49-4b85-8aa2-a048f5f54044/volumes" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.463005 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c008-account-create-update-7xcsr" event={"ID":"27c095ab-2da0-40f5-b361-e40819c7b3aa","Type":"ContainerDied","Data":"38352da22a953382237e07cd1b64049194a8d11ae826d1c71dd905e5543611af"} Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.463048 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38352da22a953382237e07cd1b64049194a8d11ae826d1c71dd905e5543611af" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.464135 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c008-account-create-update-7xcsr" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.464267 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-m775k" event={"ID":"081e4fcd-2dd7-4e2b-b276-28a0ec4f7110","Type":"ContainerDied","Data":"5ef962f5ac7e37ee7a2e2018010f622f49b9dfd7e0a94289559338d6cdb35784"} Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.464305 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-m775k" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.464309 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ef962f5ac7e37ee7a2e2018010f622f49b9dfd7e0a94289559338d6cdb35784" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.539439 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:54:09 crc kubenswrapper[4861]: E0129 06:54:09.539748 4861 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 06:54:09 crc kubenswrapper[4861]: E0129 06:54:09.539770 4861 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 06:54:09 crc kubenswrapper[4861]: E0129 06:54:09.539825 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift podName:7d54030c-d725-4a6c-ad29-d84482378f20 nodeName:}" failed. No retries permitted until 2026-01-29 06:54:25.539806991 +0000 UTC m=+1157.211301548 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift") pod "swift-storage-0" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20") : configmap "swift-ring-files" not found Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.745936 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.847497 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbzc2\" (UniqueName: \"kubernetes.io/projected/b379c23e-2bef-4aa8-b656-e6c152261562-kube-api-access-zbzc2\") pod \"b379c23e-2bef-4aa8-b656-e6c152261562\" (UID: \"b379c23e-2bef-4aa8-b656-e6c152261562\") " Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.847914 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b379c23e-2bef-4aa8-b656-e6c152261562-operator-scripts\") pod \"b379c23e-2bef-4aa8-b656-e6c152261562\" (UID: \"b379c23e-2bef-4aa8-b656-e6c152261562\") " Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.848295 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b379c23e-2bef-4aa8-b656-e6c152261562-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b379c23e-2bef-4aa8-b656-e6c152261562" (UID: "b379c23e-2bef-4aa8-b656-e6c152261562"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.848977 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b379c23e-2bef-4aa8-b656-e6c152261562-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.851820 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b379c23e-2bef-4aa8-b656-e6c152261562-kube-api-access-zbzc2" (OuterVolumeSpecName: "kube-api-access-zbzc2") pod "b379c23e-2bef-4aa8-b656-e6c152261562" (UID: "b379c23e-2bef-4aa8-b656-e6c152261562"). InnerVolumeSpecName "kube-api-access-zbzc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:09 crc kubenswrapper[4861]: I0129 06:54:09.951257 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbzc2\" (UniqueName: \"kubernetes.io/projected/b379c23e-2bef-4aa8-b656-e6c152261562-kube-api-access-zbzc2\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:10 crc kubenswrapper[4861]: I0129 06:54:10.476741 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-n8bb6" event={"ID":"b379c23e-2bef-4aa8-b656-e6c152261562","Type":"ContainerDied","Data":"a16695c0a8fe6b307c07ae5675b286ed61621b11c3b3648a0f37583be630f50e"} Jan 29 06:54:10 crc kubenswrapper[4861]: I0129 06:54:10.476782 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a16695c0a8fe6b307c07ae5675b286ed61621b11c3b3648a0f37583be630f50e" Jan 29 06:54:10 crc kubenswrapper[4861]: I0129 06:54:10.476847 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n8bb6" Jan 29 06:54:10 crc kubenswrapper[4861]: I0129 06:54:10.486419 4861 generic.go:334] "Generic (PLEG): container finished" podID="f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" containerID="f3570debc62991c7c1fa0d77b1dc28def8d437df67468235887383ae41cd48d3" exitCode=0 Jan 29 06:54:10 crc kubenswrapper[4861]: I0129 06:54:10.486485 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-5j2wv" event={"ID":"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9","Type":"ContainerDied","Data":"f3570debc62991c7c1fa0d77b1dc28def8d437df67468235887383ae41cd48d3"} Jan 29 06:54:10 crc kubenswrapper[4861]: I0129 06:54:10.962805 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-z5wvn" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" probeResult="failure" output=< Jan 29 06:54:10 crc kubenswrapper[4861]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 06:54:10 crc kubenswrapper[4861]: > Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.110198 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-psncj"] Jan 29 06:54:11 crc kubenswrapper[4861]: E0129 06:54:11.110665 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="081e4fcd-2dd7-4e2b-b276-28a0ec4f7110" containerName="mariadb-database-create" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.110687 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="081e4fcd-2dd7-4e2b-b276-28a0ec4f7110" containerName="mariadb-database-create" Jan 29 06:54:11 crc kubenswrapper[4861]: E0129 06:54:11.110708 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b379c23e-2bef-4aa8-b656-e6c152261562" containerName="mariadb-account-create-update" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.110717 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b379c23e-2bef-4aa8-b656-e6c152261562" containerName="mariadb-account-create-update" Jan 29 06:54:11 crc kubenswrapper[4861]: E0129 06:54:11.110741 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27c095ab-2da0-40f5-b361-e40819c7b3aa" containerName="mariadb-account-create-update" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.110751 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="27c095ab-2da0-40f5-b361-e40819c7b3aa" containerName="mariadb-account-create-update" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.110922 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b379c23e-2bef-4aa8-b656-e6c152261562" containerName="mariadb-account-create-update" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.110951 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="27c095ab-2da0-40f5-b361-e40819c7b3aa" containerName="mariadb-account-create-update" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.110960 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="081e4fcd-2dd7-4e2b-b276-28a0ec4f7110" containerName="mariadb-database-create" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.111550 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.115365 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.121165 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-nnz82" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.140333 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-psncj"] Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.239199 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-combined-ca-bundle\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.239292 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppphp\" (UniqueName: \"kubernetes.io/projected/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-kube-api-access-ppphp\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.239331 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-db-sync-config-data\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.239368 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-config-data\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.341225 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-config-data\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.341304 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-combined-ca-bundle\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.341379 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppphp\" (UniqueName: \"kubernetes.io/projected/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-kube-api-access-ppphp\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.341416 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-db-sync-config-data\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.347042 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-db-sync-config-data\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.347456 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-config-data\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.357908 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-combined-ca-bundle\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.358199 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppphp\" (UniqueName: \"kubernetes.io/projected/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-kube-api-access-ppphp\") pod \"glance-db-sync-psncj\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.428008 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-psncj" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.798148 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.958984 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-combined-ca-bundle\") pod \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.959200 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-swiftconf\") pod \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.959324 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-scripts\") pod \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.959381 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pn2t7\" (UniqueName: \"kubernetes.io/projected/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-kube-api-access-pn2t7\") pod \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.959403 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-etc-swift\") pod \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.959462 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-ring-data-devices\") pod \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.959514 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-dispersionconf\") pod \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\" (UID: \"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9\") " Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.961592 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" (UID: "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.961849 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" (UID: "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.965136 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-kube-api-access-pn2t7" (OuterVolumeSpecName: "kube-api-access-pn2t7") pod "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" (UID: "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9"). InnerVolumeSpecName "kube-api-access-pn2t7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.968333 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" (UID: "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.986808 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-scripts" (OuterVolumeSpecName: "scripts") pod "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" (UID: "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.989940 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" (UID: "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:54:11 crc kubenswrapper[4861]: I0129 06:54:11.990155 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" (UID: "f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.037008 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-psncj"] Jan 29 06:54:12 crc kubenswrapper[4861]: W0129 06:54:12.043660 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod505a3759_b9c9_48ed_b63c_cc0b1e253fe5.slice/crio-ad9aaab3c2125df4069b83fa96b23cab3ec420ca5ffa77202202d86781e042a1 WatchSource:0}: Error finding container ad9aaab3c2125df4069b83fa96b23cab3ec420ca5ffa77202202d86781e042a1: Status 404 returned error can't find the container with id ad9aaab3c2125df4069b83fa96b23cab3ec420ca5ffa77202202d86781e042a1 Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.061271 4861 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.061309 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.061324 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pn2t7\" (UniqueName: \"kubernetes.io/projected/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-kube-api-access-pn2t7\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.061340 4861 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.061352 4861 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.061361 4861 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.061371 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.510413 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-5j2wv" event={"ID":"f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9","Type":"ContainerDied","Data":"39678c0b309befc6eaca3a6e21adacef97433087953fc2955cd4481cf1a9ac40"} Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.511286 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39678c0b309befc6eaca3a6e21adacef97433087953fc2955cd4481cf1a9ac40" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.510494 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-5j2wv" Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.512107 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-psncj" event={"ID":"505a3759-b9c9-48ed-b63c-cc0b1e253fe5","Type":"ContainerStarted","Data":"ad9aaab3c2125df4069b83fa96b23cab3ec420ca5ffa77202202d86781e042a1"} Jan 29 06:54:12 crc kubenswrapper[4861]: I0129 06:54:12.739591 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 29 06:54:15 crc kubenswrapper[4861]: I0129 06:54:15.966379 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-z5wvn" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" probeResult="failure" output=< Jan 29 06:54:15 crc kubenswrapper[4861]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 06:54:15 crc kubenswrapper[4861]: > Jan 29 06:54:15 crc kubenswrapper[4861]: I0129 06:54:15.987946 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.000858 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.235188 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-z5wvn-config-kxxk8"] Jan 29 06:54:16 crc kubenswrapper[4861]: E0129 06:54:16.235481 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" containerName="swift-ring-rebalance" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.235492 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" containerName="swift-ring-rebalance" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.235662 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" containerName="swift-ring-rebalance" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.237944 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.242528 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.257231 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.278540 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-z5wvn-config-kxxk8"] Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.354666 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.354737 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-scripts\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.354766 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-log-ovn\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.354818 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run-ovn\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.354861 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-additional-scripts\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.354982 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcpgk\" (UniqueName: \"kubernetes.io/projected/00713e50-fa0d-4902-bcdb-285f887fbfd3-kube-api-access-mcpgk\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456030 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456182 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-scripts\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456208 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-log-ovn\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456259 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run-ovn\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456298 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-additional-scripts\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456363 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcpgk\" (UniqueName: \"kubernetes.io/projected/00713e50-fa0d-4902-bcdb-285f887fbfd3-kube-api-access-mcpgk\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456383 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456900 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run-ovn\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.456958 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-log-ovn\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.457344 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-additional-scripts\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.459604 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-scripts\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.481777 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcpgk\" (UniqueName: \"kubernetes.io/projected/00713e50-fa0d-4902-bcdb-285f887fbfd3-kube-api-access-mcpgk\") pod \"ovn-controller-z5wvn-config-kxxk8\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:16 crc kubenswrapper[4861]: I0129 06:54:16.579548 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:17 crc kubenswrapper[4861]: I0129 06:54:17.014100 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-z5wvn-config-kxxk8"] Jan 29 06:54:17 crc kubenswrapper[4861]: I0129 06:54:17.154265 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.102379 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-ngprs"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.103800 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.113876 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-ngprs"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.204913 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-r5hz4"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.206136 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.218567 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-20e6-account-create-update-g8kvq"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.219588 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.224545 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.249533 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-r5hz4"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.290173 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34ae275d-679d-40f9-883c-f72b76d821fe-operator-scripts\") pod \"cinder-db-create-ngprs\" (UID: \"34ae275d-679d-40f9-883c-f72b76d821fe\") " pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.290237 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7kc2\" (UniqueName: \"kubernetes.io/projected/34ae275d-679d-40f9-883c-f72b76d821fe-kube-api-access-k7kc2\") pod \"cinder-db-create-ngprs\" (UID: \"34ae275d-679d-40f9-883c-f72b76d821fe\") " pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.294708 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-20e6-account-create-update-g8kvq"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.309603 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-3399-account-create-update-sg992"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.310779 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.313438 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.322283 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3399-account-create-update-sg992"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.393203 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/037a57cc-64cd-4b10-9c94-c609072db4f3-operator-scripts\") pod \"barbican-20e6-account-create-update-g8kvq\" (UID: \"037a57cc-64cd-4b10-9c94-c609072db4f3\") " pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.393260 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-operator-scripts\") pod \"barbican-db-create-r5hz4\" (UID: \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\") " pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.393681 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34ae275d-679d-40f9-883c-f72b76d821fe-operator-scripts\") pod \"cinder-db-create-ngprs\" (UID: \"34ae275d-679d-40f9-883c-f72b76d821fe\") " pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.393735 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkd75\" (UniqueName: \"kubernetes.io/projected/037a57cc-64cd-4b10-9c94-c609072db4f3-kube-api-access-jkd75\") pod \"barbican-20e6-account-create-update-g8kvq\" (UID: \"037a57cc-64cd-4b10-9c94-c609072db4f3\") " pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.393892 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7kc2\" (UniqueName: \"kubernetes.io/projected/34ae275d-679d-40f9-883c-f72b76d821fe-kube-api-access-k7kc2\") pod \"cinder-db-create-ngprs\" (UID: \"34ae275d-679d-40f9-883c-f72b76d821fe\") " pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.393985 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkc4l\" (UniqueName: \"kubernetes.io/projected/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-kube-api-access-xkc4l\") pod \"barbican-db-create-r5hz4\" (UID: \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\") " pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.394187 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34ae275d-679d-40f9-883c-f72b76d821fe-operator-scripts\") pod \"cinder-db-create-ngprs\" (UID: \"34ae275d-679d-40f9-883c-f72b76d821fe\") " pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.413941 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-256m5"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.415154 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-256m5" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.419868 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-256m5"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.444276 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7kc2\" (UniqueName: \"kubernetes.io/projected/34ae275d-679d-40f9-883c-f72b76d821fe-kube-api-access-k7kc2\") pod \"cinder-db-create-ngprs\" (UID: \"34ae275d-679d-40f9-883c-f72b76d821fe\") " pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.466359 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-qpnsr"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.467424 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.476795 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-qpnsr"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.477191 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.477353 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xp8pr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.477465 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.477569 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.502171 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-operator-scripts\") pod \"cinder-3399-account-create-update-sg992\" (UID: \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\") " pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.502249 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkc4l\" (UniqueName: \"kubernetes.io/projected/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-kube-api-access-xkc4l\") pod \"barbican-db-create-r5hz4\" (UID: \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\") " pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.502288 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/037a57cc-64cd-4b10-9c94-c609072db4f3-operator-scripts\") pod \"barbican-20e6-account-create-update-g8kvq\" (UID: \"037a57cc-64cd-4b10-9c94-c609072db4f3\") " pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.502317 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prdfp\" (UniqueName: \"kubernetes.io/projected/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-kube-api-access-prdfp\") pod \"cinder-3399-account-create-update-sg992\" (UID: \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\") " pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.502341 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-operator-scripts\") pod \"barbican-db-create-r5hz4\" (UID: \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\") " pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.502383 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkd75\" (UniqueName: \"kubernetes.io/projected/037a57cc-64cd-4b10-9c94-c609072db4f3-kube-api-access-jkd75\") pod \"barbican-20e6-account-create-update-g8kvq\" (UID: \"037a57cc-64cd-4b10-9c94-c609072db4f3\") " pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.503451 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/037a57cc-64cd-4b10-9c94-c609072db4f3-operator-scripts\") pod \"barbican-20e6-account-create-update-g8kvq\" (UID: \"037a57cc-64cd-4b10-9c94-c609072db4f3\") " pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.504110 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-operator-scripts\") pod \"barbican-db-create-r5hz4\" (UID: \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\") " pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.535758 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkd75\" (UniqueName: \"kubernetes.io/projected/037a57cc-64cd-4b10-9c94-c609072db4f3-kube-api-access-jkd75\") pod \"barbican-20e6-account-create-update-g8kvq\" (UID: \"037a57cc-64cd-4b10-9c94-c609072db4f3\") " pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.543273 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkc4l\" (UniqueName: \"kubernetes.io/projected/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-kube-api-access-xkc4l\") pod \"barbican-db-create-r5hz4\" (UID: \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\") " pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.551351 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.578667 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dfed-account-create-update-hvzjq"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.580092 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.582593 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.594235 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dfed-account-create-update-hvzjq"] Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.603539 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prdfp\" (UniqueName: \"kubernetes.io/projected/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-kube-api-access-prdfp\") pod \"cinder-3399-account-create-update-sg992\" (UID: \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\") " pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.603585 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-config-data\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.603700 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf85x\" (UniqueName: \"kubernetes.io/projected/d4785f86-012c-4e4f-98aa-0e4334a923bc-kube-api-access-qf85x\") pod \"neutron-db-create-256m5\" (UID: \"d4785f86-012c-4e4f-98aa-0e4334a923bc\") " pod="openstack/neutron-db-create-256m5" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.603827 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-operator-scripts\") pod \"cinder-3399-account-create-update-sg992\" (UID: \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\") " pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.603864 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-combined-ca-bundle\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.603906 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4785f86-012c-4e4f-98aa-0e4334a923bc-operator-scripts\") pod \"neutron-db-create-256m5\" (UID: \"d4785f86-012c-4e4f-98aa-0e4334a923bc\") " pod="openstack/neutron-db-create-256m5" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.603935 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-698gs\" (UniqueName: \"kubernetes.io/projected/d8180980-2786-4514-a34e-4d68009ea724-kube-api-access-698gs\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.605724 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-operator-scripts\") pod \"cinder-3399-account-create-update-sg992\" (UID: \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\") " pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.634729 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prdfp\" (UniqueName: \"kubernetes.io/projected/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-kube-api-access-prdfp\") pod \"cinder-3399-account-create-update-sg992\" (UID: \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\") " pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.705531 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stnsz\" (UniqueName: \"kubernetes.io/projected/f1c26938-2f5c-448b-8590-16ce29878d3b-kube-api-access-stnsz\") pod \"neutron-dfed-account-create-update-hvzjq\" (UID: \"f1c26938-2f5c-448b-8590-16ce29878d3b\") " pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.705967 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-config-data\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.706033 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1c26938-2f5c-448b-8590-16ce29878d3b-operator-scripts\") pod \"neutron-dfed-account-create-update-hvzjq\" (UID: \"f1c26938-2f5c-448b-8590-16ce29878d3b\") " pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.706062 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf85x\" (UniqueName: \"kubernetes.io/projected/d4785f86-012c-4e4f-98aa-0e4334a923bc-kube-api-access-qf85x\") pod \"neutron-db-create-256m5\" (UID: \"d4785f86-012c-4e4f-98aa-0e4334a923bc\") " pod="openstack/neutron-db-create-256m5" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.706137 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-combined-ca-bundle\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.706162 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4785f86-012c-4e4f-98aa-0e4334a923bc-operator-scripts\") pod \"neutron-db-create-256m5\" (UID: \"d4785f86-012c-4e4f-98aa-0e4334a923bc\") " pod="openstack/neutron-db-create-256m5" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.706188 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-698gs\" (UniqueName: \"kubernetes.io/projected/d8180980-2786-4514-a34e-4d68009ea724-kube-api-access-698gs\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.706779 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4785f86-012c-4e4f-98aa-0e4334a923bc-operator-scripts\") pod \"neutron-db-create-256m5\" (UID: \"d4785f86-012c-4e4f-98aa-0e4334a923bc\") " pod="openstack/neutron-db-create-256m5" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.709612 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-config-data\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.712298 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-combined-ca-bundle\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.723989 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-698gs\" (UniqueName: \"kubernetes.io/projected/d8180980-2786-4514-a34e-4d68009ea724-kube-api-access-698gs\") pod \"keystone-db-sync-qpnsr\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.724210 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.729344 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf85x\" (UniqueName: \"kubernetes.io/projected/d4785f86-012c-4e4f-98aa-0e4334a923bc-kube-api-access-qf85x\") pod \"neutron-db-create-256m5\" (UID: \"d4785f86-012c-4e4f-98aa-0e4334a923bc\") " pod="openstack/neutron-db-create-256m5" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.734676 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-256m5" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.807118 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1c26938-2f5c-448b-8590-16ce29878d3b-operator-scripts\") pod \"neutron-dfed-account-create-update-hvzjq\" (UID: \"f1c26938-2f5c-448b-8590-16ce29878d3b\") " pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.807227 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stnsz\" (UniqueName: \"kubernetes.io/projected/f1c26938-2f5c-448b-8590-16ce29878d3b-kube-api-access-stnsz\") pod \"neutron-dfed-account-create-update-hvzjq\" (UID: \"f1c26938-2f5c-448b-8590-16ce29878d3b\") " pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.807492 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.807891 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1c26938-2f5c-448b-8590-16ce29878d3b-operator-scripts\") pod \"neutron-dfed-account-create-update-hvzjq\" (UID: \"f1c26938-2f5c-448b-8590-16ce29878d3b\") " pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.818480 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.825654 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stnsz\" (UniqueName: \"kubernetes.io/projected/f1c26938-2f5c-448b-8590-16ce29878d3b-kube-api-access-stnsz\") pod \"neutron-dfed-account-create-update-hvzjq\" (UID: \"f1c26938-2f5c-448b-8590-16ce29878d3b\") " pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.908531 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:18 crc kubenswrapper[4861]: I0129 06:54:18.930710 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:20 crc kubenswrapper[4861]: I0129 06:54:20.983058 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-z5wvn" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" probeResult="failure" output=< Jan 29 06:54:20 crc kubenswrapper[4861]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 06:54:20 crc kubenswrapper[4861]: > Jan 29 06:54:24 crc kubenswrapper[4861]: I0129 06:54:24.638708 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn-config-kxxk8" event={"ID":"00713e50-fa0d-4902-bcdb-285f887fbfd3","Type":"ContainerStarted","Data":"303162db4211a7ec642dd35f591650c23ae80f123779134d9db286f4d7adac52"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.042293 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-20e6-account-create-update-g8kvq"] Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.046437 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-256m5"] Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.099643 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dfed-account-create-update-hvzjq"] Jan 29 06:54:25 crc kubenswrapper[4861]: W0129 06:54:25.114448 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8180980_2786_4514_a34e_4d68009ea724.slice/crio-d8cf16feba3abbec18af8ffb7f360cc3ad7b4b375518d050f9d68161176a1e4a WatchSource:0}: Error finding container d8cf16feba3abbec18af8ffb7f360cc3ad7b4b375518d050f9d68161176a1e4a: Status 404 returned error can't find the container with id d8cf16feba3abbec18af8ffb7f360cc3ad7b4b375518d050f9d68161176a1e4a Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.132981 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-qpnsr"] Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.193040 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3399-account-create-update-sg992"] Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.221110 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-ngprs"] Jan 29 06:54:25 crc kubenswrapper[4861]: W0129 06:54:25.223628 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1ee1ab9_e4b7_47d9_ad86_c05c7c1494ba.slice/crio-3cdf3ace4938265f11620e79277e4e607c1d6752ef476a23b4d1a6a709286af5 WatchSource:0}: Error finding container 3cdf3ace4938265f11620e79277e4e607c1d6752ef476a23b4d1a6a709286af5: Status 404 returned error can't find the container with id 3cdf3ace4938265f11620e79277e4e607c1d6752ef476a23b4d1a6a709286af5 Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.229914 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-r5hz4"] Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.553363 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.559694 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"swift-storage-0\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " pod="openstack/swift-storage-0" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.646868 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qpnsr" event={"ID":"d8180980-2786-4514-a34e-4d68009ea724","Type":"ContainerStarted","Data":"d8cf16feba3abbec18af8ffb7f360cc3ad7b4b375518d050f9d68161176a1e4a"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.648263 4861 generic.go:334] "Generic (PLEG): container finished" podID="00713e50-fa0d-4902-bcdb-285f887fbfd3" containerID="76374b1f353d893fdc3069b1615d67deeb913b0c2768471ba2baee2208611a03" exitCode=0 Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.648331 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn-config-kxxk8" event={"ID":"00713e50-fa0d-4902-bcdb-285f887fbfd3","Type":"ContainerDied","Data":"76374b1f353d893fdc3069b1615d67deeb913b0c2768471ba2baee2208611a03"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.650638 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-20e6-account-create-update-g8kvq" event={"ID":"037a57cc-64cd-4b10-9c94-c609072db4f3","Type":"ContainerStarted","Data":"4d253aba4ac2e77f20e972fc48cc6c3ae8fe5652146893e2a09943dcbeb6f768"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.650741 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-20e6-account-create-update-g8kvq" event={"ID":"037a57cc-64cd-4b10-9c94-c609072db4f3","Type":"ContainerStarted","Data":"32fbdd062d0ff8f65679a5ed32a7cc0ec359b531fa003c7ee781de1939d6b1c0"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.652482 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3399-account-create-update-sg992" event={"ID":"7bc1bcf4-064f-4460-8d8f-5e619d79dbba","Type":"ContainerStarted","Data":"a3ba03aed1e2eed08ca2c4db3d49eda7c8351432864bca739efc9fef03b3696d"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.652574 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3399-account-create-update-sg992" event={"ID":"7bc1bcf4-064f-4460-8d8f-5e619d79dbba","Type":"ContainerStarted","Data":"83703ccf44a9be0dcdd7d616d4b583b33cd8a92c8f6ee7d60ef7a94967e8c1f5"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.655771 4861 generic.go:334] "Generic (PLEG): container finished" podID="d4785f86-012c-4e4f-98aa-0e4334a923bc" containerID="03fe6aeb6f9d7cf900046e36d87fe35f4d2343ed4395de8376bddce3ef4f9aad" exitCode=0 Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.655938 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-256m5" event={"ID":"d4785f86-012c-4e4f-98aa-0e4334a923bc","Type":"ContainerDied","Data":"03fe6aeb6f9d7cf900046e36d87fe35f4d2343ed4395de8376bddce3ef4f9aad"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.656001 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-256m5" event={"ID":"d4785f86-012c-4e4f-98aa-0e4334a923bc","Type":"ContainerStarted","Data":"b000116ed4a25d65019925f78442ebbc1d73322e74bcfe60088bb02421c2a0be"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.657123 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ngprs" event={"ID":"34ae275d-679d-40f9-883c-f72b76d821fe","Type":"ContainerStarted","Data":"f5ef15ca4fe37880a59d8aaf819b8144f64d194a16bcac94f22c457fa5014ff8"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.657242 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ngprs" event={"ID":"34ae275d-679d-40f9-883c-f72b76d821fe","Type":"ContainerStarted","Data":"ec9eff3fdc5fa07007f4a617fc7307da285a8e5971b3b0280b78d8e42e0c75ea"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.658736 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dfed-account-create-update-hvzjq" event={"ID":"f1c26938-2f5c-448b-8590-16ce29878d3b","Type":"ContainerStarted","Data":"c370cf25d946e29524a08b09bbd2c770d4dc22a304494f564e23437ef233b062"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.658859 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dfed-account-create-update-hvzjq" event={"ID":"f1c26938-2f5c-448b-8590-16ce29878d3b","Type":"ContainerStarted","Data":"eed0606fbd9d2d9f38b5b15398def384e2aeb3366ff986bbfd64f3200bc918c3"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.660280 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-r5hz4" event={"ID":"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba","Type":"ContainerStarted","Data":"3412f37a50f007d599dc119840663f4df1024fd44f7e0620807159235732c151"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.660364 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-r5hz4" event={"ID":"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba","Type":"ContainerStarted","Data":"3cdf3ace4938265f11620e79277e4e607c1d6752ef476a23b4d1a6a709286af5"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.661885 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-psncj" event={"ID":"505a3759-b9c9-48ed-b63c-cc0b1e253fe5","Type":"ContainerStarted","Data":"e80f9259bf11bb94852f82dccdace8a12c0dd218cca38f8e88d6fe6d72cabee2"} Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.696871 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-3399-account-create-update-sg992" podStartSLOduration=7.696854037 podStartE2EDuration="7.696854037s" podCreationTimestamp="2026-01-29 06:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:25.691665584 +0000 UTC m=+1157.363160141" watchObservedRunningTime="2026-01-29 06:54:25.696854037 +0000 UTC m=+1157.368348594" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.744736 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-20e6-account-create-update-g8kvq" podStartSLOduration=7.744720009 podStartE2EDuration="7.744720009s" podCreationTimestamp="2026-01-29 06:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:25.721374223 +0000 UTC m=+1157.392868780" watchObservedRunningTime="2026-01-29 06:54:25.744720009 +0000 UTC m=+1157.416214566" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.764220 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-r5hz4" podStartSLOduration=7.764200377 podStartE2EDuration="7.764200377s" podCreationTimestamp="2026-01-29 06:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:25.761110848 +0000 UTC m=+1157.432605405" watchObservedRunningTime="2026-01-29 06:54:25.764200377 +0000 UTC m=+1157.435694954" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.765680 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dfed-account-create-update-hvzjq" podStartSLOduration=7.765674445 podStartE2EDuration="7.765674445s" podCreationTimestamp="2026-01-29 06:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:25.74515237 +0000 UTC m=+1157.416646927" watchObservedRunningTime="2026-01-29 06:54:25.765674445 +0000 UTC m=+1157.437169002" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.781773 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-ngprs" podStartSLOduration=7.781758935 podStartE2EDuration="7.781758935s" podCreationTimestamp="2026-01-29 06:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:25.778168174 +0000 UTC m=+1157.449662731" watchObservedRunningTime="2026-01-29 06:54:25.781758935 +0000 UTC m=+1157.453253492" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.798539 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.813365 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-psncj" podStartSLOduration=2.259157477 podStartE2EDuration="14.813349332s" podCreationTimestamp="2026-01-29 06:54:11 +0000 UTC" firstStartedPulling="2026-01-29 06:54:12.046181633 +0000 UTC m=+1143.717676180" lastFinishedPulling="2026-01-29 06:54:24.600373478 +0000 UTC m=+1156.271868035" observedRunningTime="2026-01-29 06:54:25.811424073 +0000 UTC m=+1157.482918640" watchObservedRunningTime="2026-01-29 06:54:25.813349332 +0000 UTC m=+1157.484843889" Jan 29 06:54:25 crc kubenswrapper[4861]: I0129 06:54:25.974499 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-z5wvn" Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.325296 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 29 06:54:26 crc kubenswrapper[4861]: W0129 06:54:26.336910 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d54030c_d725_4a6c_ad29_d84482378f20.slice/crio-62e165a6b3e7935aaad8da87fafaf356e2b0cb08e9376558b4168b295cdf13c7 WatchSource:0}: Error finding container 62e165a6b3e7935aaad8da87fafaf356e2b0cb08e9376558b4168b295cdf13c7: Status 404 returned error can't find the container with id 62e165a6b3e7935aaad8da87fafaf356e2b0cb08e9376558b4168b295cdf13c7 Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.673021 4861 generic.go:334] "Generic (PLEG): container finished" podID="b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba" containerID="3412f37a50f007d599dc119840663f4df1024fd44f7e0620807159235732c151" exitCode=0 Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.673084 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-r5hz4" event={"ID":"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba","Type":"ContainerDied","Data":"3412f37a50f007d599dc119840663f4df1024fd44f7e0620807159235732c151"} Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.674683 4861 generic.go:334] "Generic (PLEG): container finished" podID="037a57cc-64cd-4b10-9c94-c609072db4f3" containerID="4d253aba4ac2e77f20e972fc48cc6c3ae8fe5652146893e2a09943dcbeb6f768" exitCode=0 Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.674760 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-20e6-account-create-update-g8kvq" event={"ID":"037a57cc-64cd-4b10-9c94-c609072db4f3","Type":"ContainerDied","Data":"4d253aba4ac2e77f20e972fc48cc6c3ae8fe5652146893e2a09943dcbeb6f768"} Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.675994 4861 generic.go:334] "Generic (PLEG): container finished" podID="7bc1bcf4-064f-4460-8d8f-5e619d79dbba" containerID="a3ba03aed1e2eed08ca2c4db3d49eda7c8351432864bca739efc9fef03b3696d" exitCode=0 Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.676030 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3399-account-create-update-sg992" event={"ID":"7bc1bcf4-064f-4460-8d8f-5e619d79dbba","Type":"ContainerDied","Data":"a3ba03aed1e2eed08ca2c4db3d49eda7c8351432864bca739efc9fef03b3696d"} Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.676905 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"62e165a6b3e7935aaad8da87fafaf356e2b0cb08e9376558b4168b295cdf13c7"} Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.678576 4861 generic.go:334] "Generic (PLEG): container finished" podID="34ae275d-679d-40f9-883c-f72b76d821fe" containerID="f5ef15ca4fe37880a59d8aaf819b8144f64d194a16bcac94f22c457fa5014ff8" exitCode=0 Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.678638 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ngprs" event={"ID":"34ae275d-679d-40f9-883c-f72b76d821fe","Type":"ContainerDied","Data":"f5ef15ca4fe37880a59d8aaf819b8144f64d194a16bcac94f22c457fa5014ff8"} Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.679859 4861 generic.go:334] "Generic (PLEG): container finished" podID="f1c26938-2f5c-448b-8590-16ce29878d3b" containerID="c370cf25d946e29524a08b09bbd2c770d4dc22a304494f564e23437ef233b062" exitCode=0 Jan 29 06:54:26 crc kubenswrapper[4861]: I0129 06:54:26.680028 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dfed-account-create-update-hvzjq" event={"ID":"f1c26938-2f5c-448b-8590-16ce29878d3b","Type":"ContainerDied","Data":"c370cf25d946e29524a08b09bbd2c770d4dc22a304494f564e23437ef233b062"} Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.094720 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.096317 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-256m5" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.284725 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-scripts\") pod \"00713e50-fa0d-4902-bcdb-285f887fbfd3\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285142 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run\") pod \"00713e50-fa0d-4902-bcdb-285f887fbfd3\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285212 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run-ovn\") pod \"00713e50-fa0d-4902-bcdb-285f887fbfd3\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285256 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-log-ovn\") pod \"00713e50-fa0d-4902-bcdb-285f887fbfd3\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285320 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-additional-scripts\") pod \"00713e50-fa0d-4902-bcdb-285f887fbfd3\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285347 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qf85x\" (UniqueName: \"kubernetes.io/projected/d4785f86-012c-4e4f-98aa-0e4334a923bc-kube-api-access-qf85x\") pod \"d4785f86-012c-4e4f-98aa-0e4334a923bc\" (UID: \"d4785f86-012c-4e4f-98aa-0e4334a923bc\") " Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285464 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcpgk\" (UniqueName: \"kubernetes.io/projected/00713e50-fa0d-4902-bcdb-285f887fbfd3-kube-api-access-mcpgk\") pod \"00713e50-fa0d-4902-bcdb-285f887fbfd3\" (UID: \"00713e50-fa0d-4902-bcdb-285f887fbfd3\") " Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285510 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4785f86-012c-4e4f-98aa-0e4334a923bc-operator-scripts\") pod \"d4785f86-012c-4e4f-98aa-0e4334a923bc\" (UID: \"d4785f86-012c-4e4f-98aa-0e4334a923bc\") " Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285703 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "00713e50-fa0d-4902-bcdb-285f887fbfd3" (UID: "00713e50-fa0d-4902-bcdb-285f887fbfd3"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285816 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run" (OuterVolumeSpecName: "var-run") pod "00713e50-fa0d-4902-bcdb-285f887fbfd3" (UID: "00713e50-fa0d-4902-bcdb-285f887fbfd3"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285866 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "00713e50-fa0d-4902-bcdb-285f887fbfd3" (UID: "00713e50-fa0d-4902-bcdb-285f887fbfd3"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.285990 4861 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.286013 4861 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.286026 4861 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/00713e50-fa0d-4902-bcdb-285f887fbfd3-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.286670 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4785f86-012c-4e4f-98aa-0e4334a923bc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d4785f86-012c-4e4f-98aa-0e4334a923bc" (UID: "d4785f86-012c-4e4f-98aa-0e4334a923bc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.287003 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-scripts" (OuterVolumeSpecName: "scripts") pod "00713e50-fa0d-4902-bcdb-285f887fbfd3" (UID: "00713e50-fa0d-4902-bcdb-285f887fbfd3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.287960 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "00713e50-fa0d-4902-bcdb-285f887fbfd3" (UID: "00713e50-fa0d-4902-bcdb-285f887fbfd3"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.293871 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00713e50-fa0d-4902-bcdb-285f887fbfd3-kube-api-access-mcpgk" (OuterVolumeSpecName: "kube-api-access-mcpgk") pod "00713e50-fa0d-4902-bcdb-285f887fbfd3" (UID: "00713e50-fa0d-4902-bcdb-285f887fbfd3"). InnerVolumeSpecName "kube-api-access-mcpgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.295677 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4785f86-012c-4e4f-98aa-0e4334a923bc-kube-api-access-qf85x" (OuterVolumeSpecName: "kube-api-access-qf85x") pod "d4785f86-012c-4e4f-98aa-0e4334a923bc" (UID: "d4785f86-012c-4e4f-98aa-0e4334a923bc"). InnerVolumeSpecName "kube-api-access-qf85x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.388015 4861 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.388052 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qf85x\" (UniqueName: \"kubernetes.io/projected/d4785f86-012c-4e4f-98aa-0e4334a923bc-kube-api-access-qf85x\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.388063 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcpgk\" (UniqueName: \"kubernetes.io/projected/00713e50-fa0d-4902-bcdb-285f887fbfd3-kube-api-access-mcpgk\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.388144 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4785f86-012c-4e4f-98aa-0e4334a923bc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.388158 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/00713e50-fa0d-4902-bcdb-285f887fbfd3-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.697475 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn-config-kxxk8" event={"ID":"00713e50-fa0d-4902-bcdb-285f887fbfd3","Type":"ContainerDied","Data":"303162db4211a7ec642dd35f591650c23ae80f123779134d9db286f4d7adac52"} Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.697525 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="303162db4211a7ec642dd35f591650c23ae80f123779134d9db286f4d7adac52" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.697615 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn-config-kxxk8" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.704945 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-256m5" event={"ID":"d4785f86-012c-4e4f-98aa-0e4334a923bc","Type":"ContainerDied","Data":"b000116ed4a25d65019925f78442ebbc1d73322e74bcfe60088bb02421c2a0be"} Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.705022 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b000116ed4a25d65019925f78442ebbc1d73322e74bcfe60088bb02421c2a0be" Jan 29 06:54:27 crc kubenswrapper[4861]: I0129 06:54:27.705130 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-256m5" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.201296 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-z5wvn-config-kxxk8"] Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.209810 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-z5wvn-config-kxxk8"] Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.270326 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-z5wvn-config-qjf5c"] Jan 29 06:54:28 crc kubenswrapper[4861]: E0129 06:54:28.270818 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00713e50-fa0d-4902-bcdb-285f887fbfd3" containerName="ovn-config" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.270881 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="00713e50-fa0d-4902-bcdb-285f887fbfd3" containerName="ovn-config" Jan 29 06:54:28 crc kubenswrapper[4861]: E0129 06:54:28.270944 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4785f86-012c-4e4f-98aa-0e4334a923bc" containerName="mariadb-database-create" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.271030 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4785f86-012c-4e4f-98aa-0e4334a923bc" containerName="mariadb-database-create" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.271249 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="00713e50-fa0d-4902-bcdb-285f887fbfd3" containerName="ovn-config" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.271317 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4785f86-012c-4e4f-98aa-0e4334a923bc" containerName="mariadb-database-create" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.271816 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.275172 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.283737 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-z5wvn-config-qjf5c"] Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.412144 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.412265 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-scripts\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.412394 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d97d\" (UniqueName: \"kubernetes.io/projected/c1c0fb54-9086-4393-866d-ad41d8729302-kube-api-access-6d97d\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.412441 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run-ovn\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.412492 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-additional-scripts\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.412626 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-log-ovn\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514307 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-additional-scripts\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514418 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-log-ovn\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514468 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514520 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-scripts\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514582 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d97d\" (UniqueName: \"kubernetes.io/projected/c1c0fb54-9086-4393-866d-ad41d8729302-kube-api-access-6d97d\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514611 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run-ovn\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514929 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run-ovn\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514928 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-log-ovn\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.514994 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.515674 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-additional-scripts\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.517414 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-scripts\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.535770 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d97d\" (UniqueName: \"kubernetes.io/projected/c1c0fb54-9086-4393-866d-ad41d8729302-kube-api-access-6d97d\") pod \"ovn-controller-z5wvn-config-qjf5c\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:28 crc kubenswrapper[4861]: I0129 06:54:28.593765 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:29 crc kubenswrapper[4861]: I0129 06:54:29.143725 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00713e50-fa0d-4902-bcdb-285f887fbfd3" path="/var/lib/kubelet/pods/00713e50-fa0d-4902-bcdb-285f887fbfd3/volumes" Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.735864 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3399-account-create-update-sg992" event={"ID":"7bc1bcf4-064f-4460-8d8f-5e619d79dbba","Type":"ContainerDied","Data":"83703ccf44a9be0dcdd7d616d4b583b33cd8a92c8f6ee7d60ef7a94967e8c1f5"} Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.737151 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83703ccf44a9be0dcdd7d616d4b583b33cd8a92c8f6ee7d60ef7a94967e8c1f5" Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.739984 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ngprs" event={"ID":"34ae275d-679d-40f9-883c-f72b76d821fe","Type":"ContainerDied","Data":"ec9eff3fdc5fa07007f4a617fc7307da285a8e5971b3b0280b78d8e42e0c75ea"} Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.740397 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec9eff3fdc5fa07007f4a617fc7307da285a8e5971b3b0280b78d8e42e0c75ea" Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.743784 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dfed-account-create-update-hvzjq" event={"ID":"f1c26938-2f5c-448b-8590-16ce29878d3b","Type":"ContainerDied","Data":"eed0606fbd9d2d9f38b5b15398def384e2aeb3366ff986bbfd64f3200bc918c3"} Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.743990 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eed0606fbd9d2d9f38b5b15398def384e2aeb3366ff986bbfd64f3200bc918c3" Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.746304 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-r5hz4" event={"ID":"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba","Type":"ContainerDied","Data":"3cdf3ace4938265f11620e79277e4e607c1d6752ef476a23b4d1a6a709286af5"} Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.746473 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3cdf3ace4938265f11620e79277e4e607c1d6752ef476a23b4d1a6a709286af5" Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.747452 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-20e6-account-create-update-g8kvq" event={"ID":"037a57cc-64cd-4b10-9c94-c609072db4f3","Type":"ContainerDied","Data":"32fbdd062d0ff8f65679a5ed32a7cc0ec359b531fa003c7ee781de1939d6b1c0"} Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.747481 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32fbdd062d0ff8f65679a5ed32a7cc0ec359b531fa003c7ee781de1939d6b1c0" Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.940583 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:30 crc kubenswrapper[4861]: I0129 06:54:30.981896 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.009221 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.020386 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.029324 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094441 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/037a57cc-64cd-4b10-9c94-c609072db4f3-operator-scripts\") pod \"037a57cc-64cd-4b10-9c94-c609072db4f3\" (UID: \"037a57cc-64cd-4b10-9c94-c609072db4f3\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094481 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1c26938-2f5c-448b-8590-16ce29878d3b-operator-scripts\") pod \"f1c26938-2f5c-448b-8590-16ce29878d3b\" (UID: \"f1c26938-2f5c-448b-8590-16ce29878d3b\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094559 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-operator-scripts\") pod \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\" (UID: \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094578 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prdfp\" (UniqueName: \"kubernetes.io/projected/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-kube-api-access-prdfp\") pod \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\" (UID: \"7bc1bcf4-064f-4460-8d8f-5e619d79dbba\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094660 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34ae275d-679d-40f9-883c-f72b76d821fe-operator-scripts\") pod \"34ae275d-679d-40f9-883c-f72b76d821fe\" (UID: \"34ae275d-679d-40f9-883c-f72b76d821fe\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094711 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkc4l\" (UniqueName: \"kubernetes.io/projected/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-kube-api-access-xkc4l\") pod \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\" (UID: \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094730 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7kc2\" (UniqueName: \"kubernetes.io/projected/34ae275d-679d-40f9-883c-f72b76d821fe-kube-api-access-k7kc2\") pod \"34ae275d-679d-40f9-883c-f72b76d821fe\" (UID: \"34ae275d-679d-40f9-883c-f72b76d821fe\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094752 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-operator-scripts\") pod \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\" (UID: \"b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094784 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkd75\" (UniqueName: \"kubernetes.io/projected/037a57cc-64cd-4b10-9c94-c609072db4f3-kube-api-access-jkd75\") pod \"037a57cc-64cd-4b10-9c94-c609072db4f3\" (UID: \"037a57cc-64cd-4b10-9c94-c609072db4f3\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.094823 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stnsz\" (UniqueName: \"kubernetes.io/projected/f1c26938-2f5c-448b-8590-16ce29878d3b-kube-api-access-stnsz\") pod \"f1c26938-2f5c-448b-8590-16ce29878d3b\" (UID: \"f1c26938-2f5c-448b-8590-16ce29878d3b\") " Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.095142 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/037a57cc-64cd-4b10-9c94-c609072db4f3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "037a57cc-64cd-4b10-9c94-c609072db4f3" (UID: "037a57cc-64cd-4b10-9c94-c609072db4f3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.095469 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34ae275d-679d-40f9-883c-f72b76d821fe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "34ae275d-679d-40f9-883c-f72b76d821fe" (UID: "34ae275d-679d-40f9-883c-f72b76d821fe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.095809 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1c26938-2f5c-448b-8590-16ce29878d3b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f1c26938-2f5c-448b-8590-16ce29878d3b" (UID: "f1c26938-2f5c-448b-8590-16ce29878d3b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.096260 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7bc1bcf4-064f-4460-8d8f-5e619d79dbba" (UID: "7bc1bcf4-064f-4460-8d8f-5e619d79dbba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.098970 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba" (UID: "b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.099122 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34ae275d-679d-40f9-883c-f72b76d821fe-kube-api-access-k7kc2" (OuterVolumeSpecName: "kube-api-access-k7kc2") pod "34ae275d-679d-40f9-883c-f72b76d821fe" (UID: "34ae275d-679d-40f9-883c-f72b76d821fe"). InnerVolumeSpecName "kube-api-access-k7kc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.099621 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1c26938-2f5c-448b-8590-16ce29878d3b-kube-api-access-stnsz" (OuterVolumeSpecName: "kube-api-access-stnsz") pod "f1c26938-2f5c-448b-8590-16ce29878d3b" (UID: "f1c26938-2f5c-448b-8590-16ce29878d3b"). InnerVolumeSpecName "kube-api-access-stnsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.099659 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-kube-api-access-prdfp" (OuterVolumeSpecName: "kube-api-access-prdfp") pod "7bc1bcf4-064f-4460-8d8f-5e619d79dbba" (UID: "7bc1bcf4-064f-4460-8d8f-5e619d79dbba"). InnerVolumeSpecName "kube-api-access-prdfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.100174 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-kube-api-access-xkc4l" (OuterVolumeSpecName: "kube-api-access-xkc4l") pod "b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba" (UID: "b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba"). InnerVolumeSpecName "kube-api-access-xkc4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.101395 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/037a57cc-64cd-4b10-9c94-c609072db4f3-kube-api-access-jkd75" (OuterVolumeSpecName: "kube-api-access-jkd75") pod "037a57cc-64cd-4b10-9c94-c609072db4f3" (UID: "037a57cc-64cd-4b10-9c94-c609072db4f3"). InnerVolumeSpecName "kube-api-access-jkd75". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.158139 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-z5wvn-config-qjf5c"] Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195650 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34ae275d-679d-40f9-883c-f72b76d821fe-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195684 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkc4l\" (UniqueName: \"kubernetes.io/projected/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-kube-api-access-xkc4l\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195695 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7kc2\" (UniqueName: \"kubernetes.io/projected/34ae275d-679d-40f9-883c-f72b76d821fe-kube-api-access-k7kc2\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195704 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195712 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkd75\" (UniqueName: \"kubernetes.io/projected/037a57cc-64cd-4b10-9c94-c609072db4f3-kube-api-access-jkd75\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195720 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stnsz\" (UniqueName: \"kubernetes.io/projected/f1c26938-2f5c-448b-8590-16ce29878d3b-kube-api-access-stnsz\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195731 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/037a57cc-64cd-4b10-9c94-c609072db4f3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195740 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1c26938-2f5c-448b-8590-16ce29878d3b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195750 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.195758 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prdfp\" (UniqueName: \"kubernetes.io/projected/7bc1bcf4-064f-4460-8d8f-5e619d79dbba-kube-api-access-prdfp\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.796153 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qpnsr" event={"ID":"d8180980-2786-4514-a34e-4d68009ea724","Type":"ContainerStarted","Data":"2aa3ba3724453a514e3901ddec1d2c614029a3617223ce7a100d3128aa058a01"} Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.799201 4861 generic.go:334] "Generic (PLEG): container finished" podID="c1c0fb54-9086-4393-866d-ad41d8729302" containerID="7691ae12746c8e5b7624749ffb2b6ab9a340077db2194de6534ca14f13adf6b7" exitCode=0 Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.799283 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn-config-qjf5c" event={"ID":"c1c0fb54-9086-4393-866d-ad41d8729302","Type":"ContainerDied","Data":"7691ae12746c8e5b7624749ffb2b6ab9a340077db2194de6534ca14f13adf6b7"} Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.799659 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn-config-qjf5c" event={"ID":"c1c0fb54-9086-4393-866d-ad41d8729302","Type":"ContainerStarted","Data":"20ef3f88f26df69155996be3a541f28528598931b5bff145959c523dc29a0f35"} Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805603 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-r5hz4" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805621 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-20e6-account-create-update-g8kvq" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805687 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ngprs" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805598 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"f247857b6eb8ade45650fa7fc5c2b6bff1ac506097b24f9f3cdf86be8a43d2d4"} Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805775 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"c2889e92275d93552a69c4569021d1f48b14b5ad80332e996fa65c8fc322719d"} Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805796 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"90931a6cfdb8a44357367186d2c4396fd4c9ac22d948ca358a02706b89784468"} Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805815 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"27fc60fdd9d503cf21c40b0704ab3f668d5965d81d74cbbc4c3aa6e2ce528d23"} Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805847 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3399-account-create-update-sg992" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.805859 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dfed-account-create-update-hvzjq" Jan 29 06:54:31 crc kubenswrapper[4861]: I0129 06:54:31.819969 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-qpnsr" podStartSLOduration=8.185643382 podStartE2EDuration="13.819947224s" podCreationTimestamp="2026-01-29 06:54:18 +0000 UTC" firstStartedPulling="2026-01-29 06:54:25.119426587 +0000 UTC m=+1156.790921144" lastFinishedPulling="2026-01-29 06:54:30.753730409 +0000 UTC m=+1162.425224986" observedRunningTime="2026-01-29 06:54:31.81701565 +0000 UTC m=+1163.488510227" watchObservedRunningTime="2026-01-29 06:54:31.819947224 +0000 UTC m=+1163.491441791" Jan 29 06:54:32 crc kubenswrapper[4861]: I0129 06:54:32.813776 4861 generic.go:334] "Generic (PLEG): container finished" podID="505a3759-b9c9-48ed-b63c-cc0b1e253fe5" containerID="e80f9259bf11bb94852f82dccdace8a12c0dd218cca38f8e88d6fe6d72cabee2" exitCode=0 Jan 29 06:54:32 crc kubenswrapper[4861]: I0129 06:54:32.814142 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-psncj" event={"ID":"505a3759-b9c9-48ed-b63c-cc0b1e253fe5","Type":"ContainerDied","Data":"e80f9259bf11bb94852f82dccdace8a12c0dd218cca38f8e88d6fe6d72cabee2"} Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.180904 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.331658 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-log-ovn\") pod \"c1c0fb54-9086-4393-866d-ad41d8729302\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.331822 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "c1c0fb54-9086-4393-866d-ad41d8729302" (UID: "c1c0fb54-9086-4393-866d-ad41d8729302"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.332169 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-additional-scripts\") pod \"c1c0fb54-9086-4393-866d-ad41d8729302\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.332352 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run\") pod \"c1c0fb54-9086-4393-866d-ad41d8729302\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.332512 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6d97d\" (UniqueName: \"kubernetes.io/projected/c1c0fb54-9086-4393-866d-ad41d8729302-kube-api-access-6d97d\") pod \"c1c0fb54-9086-4393-866d-ad41d8729302\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.332541 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run-ovn\") pod \"c1c0fb54-9086-4393-866d-ad41d8729302\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.332591 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-scripts\") pod \"c1c0fb54-9086-4393-866d-ad41d8729302\" (UID: \"c1c0fb54-9086-4393-866d-ad41d8729302\") " Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.333197 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "c1c0fb54-9086-4393-866d-ad41d8729302" (UID: "c1c0fb54-9086-4393-866d-ad41d8729302"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.333240 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run" (OuterVolumeSpecName: "var-run") pod "c1c0fb54-9086-4393-866d-ad41d8729302" (UID: "c1c0fb54-9086-4393-866d-ad41d8729302"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.333396 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "c1c0fb54-9086-4393-866d-ad41d8729302" (UID: "c1c0fb54-9086-4393-866d-ad41d8729302"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.334434 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-scripts" (OuterVolumeSpecName: "scripts") pod "c1c0fb54-9086-4393-866d-ad41d8729302" (UID: "c1c0fb54-9086-4393-866d-ad41d8729302"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.337517 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1c0fb54-9086-4393-866d-ad41d8729302-kube-api-access-6d97d" (OuterVolumeSpecName: "kube-api-access-6d97d") pod "c1c0fb54-9086-4393-866d-ad41d8729302" (UID: "c1c0fb54-9086-4393-866d-ad41d8729302"). InnerVolumeSpecName "kube-api-access-6d97d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.341805 4861 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.341844 4861 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.341857 4861 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.341872 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6d97d\" (UniqueName: \"kubernetes.io/projected/c1c0fb54-9086-4393-866d-ad41d8729302-kube-api-access-6d97d\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.341881 4861 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c1c0fb54-9086-4393-866d-ad41d8729302-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.341890 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c1c0fb54-9086-4393-866d-ad41d8729302-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.824809 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn-config-qjf5c" event={"ID":"c1c0fb54-9086-4393-866d-ad41d8729302","Type":"ContainerDied","Data":"20ef3f88f26df69155996be3a541f28528598931b5bff145959c523dc29a0f35"} Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.824848 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20ef3f88f26df69155996be3a541f28528598931b5bff145959c523dc29a0f35" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.824882 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn-config-qjf5c" Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.828917 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"3281266ddbd401b2f04a7cb7e231cd35c5bced4b7f65472c79c6cab82698c818"} Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.828948 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"18d6ec1b3d371c36c925fb4104455a8183e0a1995e0abd435a9954ffab121835"} Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.828960 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"42fe23b69a4684b68ede63233c8ea85578f5383ad1505896e099548f6e44a6ea"} Jan 29 06:54:33 crc kubenswrapper[4861]: I0129 06:54:33.828968 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"5f0359b2c69c9a01c0a74bbb8ecc34b7cb21acbd0a142f267a70aaf243d0d4d1"} Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.252665 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-psncj" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.276782 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-z5wvn-config-qjf5c"] Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.287178 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-z5wvn-config-qjf5c"] Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.358353 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-db-sync-config-data\") pod \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.358600 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppphp\" (UniqueName: \"kubernetes.io/projected/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-kube-api-access-ppphp\") pod \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.358698 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-config-data\") pod \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.358787 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-combined-ca-bundle\") pod \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\" (UID: \"505a3759-b9c9-48ed-b63c-cc0b1e253fe5\") " Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.376396 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "505a3759-b9c9-48ed-b63c-cc0b1e253fe5" (UID: "505a3759-b9c9-48ed-b63c-cc0b1e253fe5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.376498 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-kube-api-access-ppphp" (OuterVolumeSpecName: "kube-api-access-ppphp") pod "505a3759-b9c9-48ed-b63c-cc0b1e253fe5" (UID: "505a3759-b9c9-48ed-b63c-cc0b1e253fe5"). InnerVolumeSpecName "kube-api-access-ppphp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.398766 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "505a3759-b9c9-48ed-b63c-cc0b1e253fe5" (UID: "505a3759-b9c9-48ed-b63c-cc0b1e253fe5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.431242 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-config-data" (OuterVolumeSpecName: "config-data") pod "505a3759-b9c9-48ed-b63c-cc0b1e253fe5" (UID: "505a3759-b9c9-48ed-b63c-cc0b1e253fe5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.460276 4861 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.460318 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppphp\" (UniqueName: \"kubernetes.io/projected/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-kube-api-access-ppphp\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.460330 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.460339 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/505a3759-b9c9-48ed-b63c-cc0b1e253fe5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.860771 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"696299cd0fa4bb5069c2910a6be63baa743730b8326a70bb3ffd8aa9d1c825ec"} Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.877057 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-psncj" event={"ID":"505a3759-b9c9-48ed-b63c-cc0b1e253fe5","Type":"ContainerDied","Data":"ad9aaab3c2125df4069b83fa96b23cab3ec420ca5ffa77202202d86781e042a1"} Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.877127 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad9aaab3c2125df4069b83fa96b23cab3ec420ca5ffa77202202d86781e042a1" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.877239 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-psncj" Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.882315 4861 generic.go:334] "Generic (PLEG): container finished" podID="d8180980-2786-4514-a34e-4d68009ea724" containerID="2aa3ba3724453a514e3901ddec1d2c614029a3617223ce7a100d3128aa058a01" exitCode=0 Jan 29 06:54:34 crc kubenswrapper[4861]: I0129 06:54:34.882373 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qpnsr" event={"ID":"d8180980-2786-4514-a34e-4d68009ea724","Type":"ContainerDied","Data":"2aa3ba3724453a514e3901ddec1d2c614029a3617223ce7a100d3128aa058a01"} Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.124465 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1c0fb54-9086-4393-866d-ad41d8729302" path="/var/lib/kubelet/pods/c1c0fb54-9086-4393-866d-ad41d8729302/volumes" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.253491 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-96x4v"] Jan 29 06:54:35 crc kubenswrapper[4861]: E0129 06:54:35.253796 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba" containerName="mariadb-database-create" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.253811 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba" containerName="mariadb-database-create" Jan 29 06:54:35 crc kubenswrapper[4861]: E0129 06:54:35.253831 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="505a3759-b9c9-48ed-b63c-cc0b1e253fe5" containerName="glance-db-sync" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.253837 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="505a3759-b9c9-48ed-b63c-cc0b1e253fe5" containerName="glance-db-sync" Jan 29 06:54:35 crc kubenswrapper[4861]: E0129 06:54:35.253849 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c0fb54-9086-4393-866d-ad41d8729302" containerName="ovn-config" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.253855 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c0fb54-9086-4393-866d-ad41d8729302" containerName="ovn-config" Jan 29 06:54:35 crc kubenswrapper[4861]: E0129 06:54:35.253867 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34ae275d-679d-40f9-883c-f72b76d821fe" containerName="mariadb-database-create" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.253873 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="34ae275d-679d-40f9-883c-f72b76d821fe" containerName="mariadb-database-create" Jan 29 06:54:35 crc kubenswrapper[4861]: E0129 06:54:35.253882 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bc1bcf4-064f-4460-8d8f-5e619d79dbba" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.253887 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bc1bcf4-064f-4460-8d8f-5e619d79dbba" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: E0129 06:54:35.253900 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="037a57cc-64cd-4b10-9c94-c609072db4f3" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.253906 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="037a57cc-64cd-4b10-9c94-c609072db4f3" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: E0129 06:54:35.253915 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1c26938-2f5c-448b-8590-16ce29878d3b" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.253920 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1c26938-2f5c-448b-8590-16ce29878d3b" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.254065 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="34ae275d-679d-40f9-883c-f72b76d821fe" containerName="mariadb-database-create" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.254097 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba" containerName="mariadb-database-create" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.254107 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="037a57cc-64cd-4b10-9c94-c609072db4f3" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.254121 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="505a3759-b9c9-48ed-b63c-cc0b1e253fe5" containerName="glance-db-sync" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.254130 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1c26938-2f5c-448b-8590-16ce29878d3b" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.254137 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1c0fb54-9086-4393-866d-ad41d8729302" containerName="ovn-config" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.254145 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bc1bcf4-064f-4460-8d8f-5e619d79dbba" containerName="mariadb-account-create-update" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.254860 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.268327 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-96x4v"] Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.383330 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-dns-svc\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.383674 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-sb\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.383705 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-config\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.383770 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqtl7\" (UniqueName: \"kubernetes.io/projected/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-kube-api-access-lqtl7\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.383846 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-nb\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.486108 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-nb\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.486154 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-dns-svc\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.486172 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-sb\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.486189 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-config\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.486259 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqtl7\" (UniqueName: \"kubernetes.io/projected/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-kube-api-access-lqtl7\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.487051 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-sb\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.487086 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-dns-svc\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.487233 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-config\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.487634 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-nb\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.505628 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqtl7\" (UniqueName: \"kubernetes.io/projected/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-kube-api-access-lqtl7\") pod \"dnsmasq-dns-6bfd654465-96x4v\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.639346 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.915375 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"300cdcb844a68c46fd719e6be6e862e7b417f885d1ab7289bf038801298b0951"} Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.915662 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"e2a3b495086295e31b7ed56c3d2932e3f985fccd26d8e9e239e77653b59a0d32"} Jan 29 06:54:35 crc kubenswrapper[4861]: I0129 06:54:35.915674 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"7fed2197542cb4f3117973c4387005866a5b3aa792d7b6f414b399fca8226503"} Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.270678 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-96x4v"] Jan 29 06:54:36 crc kubenswrapper[4861]: W0129 06:54:36.270924 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod674e0afa_f2e4_4ed3_8ff2_7ef7c761d38b.slice/crio-1ff05f024605d8c841bfe8a28e637520fa7588a704a712a1d7dfcd0cf126c9bf WatchSource:0}: Error finding container 1ff05f024605d8c841bfe8a28e637520fa7588a704a712a1d7dfcd0cf126c9bf: Status 404 returned error can't find the container with id 1ff05f024605d8c841bfe8a28e637520fa7588a704a712a1d7dfcd0cf126c9bf Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.368024 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.503212 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-combined-ca-bundle\") pod \"d8180980-2786-4514-a34e-4d68009ea724\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.503472 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-config-data\") pod \"d8180980-2786-4514-a34e-4d68009ea724\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.503603 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-698gs\" (UniqueName: \"kubernetes.io/projected/d8180980-2786-4514-a34e-4d68009ea724-kube-api-access-698gs\") pod \"d8180980-2786-4514-a34e-4d68009ea724\" (UID: \"d8180980-2786-4514-a34e-4d68009ea724\") " Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.508025 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8180980-2786-4514-a34e-4d68009ea724-kube-api-access-698gs" (OuterVolumeSpecName: "kube-api-access-698gs") pod "d8180980-2786-4514-a34e-4d68009ea724" (UID: "d8180980-2786-4514-a34e-4d68009ea724"). InnerVolumeSpecName "kube-api-access-698gs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.538093 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8180980-2786-4514-a34e-4d68009ea724" (UID: "d8180980-2786-4514-a34e-4d68009ea724"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.550959 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-config-data" (OuterVolumeSpecName: "config-data") pod "d8180980-2786-4514-a34e-4d68009ea724" (UID: "d8180980-2786-4514-a34e-4d68009ea724"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.604866 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-698gs\" (UniqueName: \"kubernetes.io/projected/d8180980-2786-4514-a34e-4d68009ea724-kube-api-access-698gs\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.604898 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.604907 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8180980-2786-4514-a34e-4d68009ea724-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.951207 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"80b76bbf5574a1ffe9a28896fadf09a48689fb5bb78991c8c124528c6850d0ee"} Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.953135 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qpnsr" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.953132 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qpnsr" event={"ID":"d8180980-2786-4514-a34e-4d68009ea724","Type":"ContainerDied","Data":"d8cf16feba3abbec18af8ffb7f360cc3ad7b4b375518d050f9d68161176a1e4a"} Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.953362 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8cf16feba3abbec18af8ffb7f360cc3ad7b4b375518d050f9d68161176a1e4a" Jan 29 06:54:36 crc kubenswrapper[4861]: I0129 06:54:36.954543 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bfd654465-96x4v" event={"ID":"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b","Type":"ContainerStarted","Data":"1ff05f024605d8c841bfe8a28e637520fa7588a704a712a1d7dfcd0cf126c9bf"} Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.138244 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-96x4v"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.158547 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-m2rjw"] Jan 29 06:54:37 crc kubenswrapper[4861]: E0129 06:54:37.159015 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8180980-2786-4514-a34e-4d68009ea724" containerName="keystone-db-sync" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.159038 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8180980-2786-4514-a34e-4d68009ea724" containerName="keystone-db-sync" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.159240 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8180980-2786-4514-a34e-4d68009ea724" containerName="keystone-db-sync" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.159798 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.162639 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-m5l24"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.166057 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.169417 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xp8pr" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.170220 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.170423 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.170568 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.176095 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.176677 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-m2rjw"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.205111 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-m5l24"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313552 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-credential-keys\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313601 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-fernet-keys\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313635 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-nb\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313684 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-scripts\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313705 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp9ms\" (UniqueName: \"kubernetes.io/projected/be608de7-3ef1-4706-9527-d216de787b20-kube-api-access-pp9ms\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313724 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7ppl\" (UniqueName: \"kubernetes.io/projected/e173b03d-2076-4c17-9ff5-1b35da3c6af1-kube-api-access-n7ppl\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313743 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-config-data\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313785 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-combined-ca-bundle\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313806 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-config\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313822 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-dns-svc\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.313839 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-sb\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.343220 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-8b6sf"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.344932 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.353477 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.353554 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-p72xd" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.353594 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.383384 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-8b6sf"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.391728 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-c5jkq"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.392812 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.406939 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.407293 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.414987 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-nb\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415066 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-scripts\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415100 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp9ms\" (UniqueName: \"kubernetes.io/projected/be608de7-3ef1-4706-9527-d216de787b20-kube-api-access-pp9ms\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415118 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7ppl\" (UniqueName: \"kubernetes.io/projected/e173b03d-2076-4c17-9ff5-1b35da3c6af1-kube-api-access-n7ppl\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415138 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-config-data\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415181 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-combined-ca-bundle\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415200 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-config\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415215 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-dns-svc\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415229 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-sb\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415262 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-credential-keys\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415283 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-fernet-keys\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.415864 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-nb\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.418200 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-bhr6h" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.418975 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-sb\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.419054 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-dns-svc\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.419562 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-config\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.426416 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.433467 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-fernet-keys\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.433727 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-config-data\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.434015 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-combined-ca-bundle\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.434888 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.435144 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp9ms\" (UniqueName: \"kubernetes.io/projected/be608de7-3ef1-4706-9527-d216de787b20-kube-api-access-pp9ms\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.436171 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.438517 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.447641 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-scripts\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.459501 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7ppl\" (UniqueName: \"kubernetes.io/projected/e173b03d-2076-4c17-9ff5-1b35da3c6af1-kube-api-access-n7ppl\") pod \"dnsmasq-dns-99559fbf5-m5l24\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.474171 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-c5jkq"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.487471 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-credential-keys\") pod \"keystone-bootstrap-m2rjw\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.488570 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.506520 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.511749 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525671 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-db-sync-config-data\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525729 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-config\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525759 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-scripts\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525784 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-combined-ca-bundle\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525842 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-config-data\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525860 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjjx5\" (UniqueName: \"kubernetes.io/projected/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-kube-api-access-mjjx5\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525878 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-log-httpd\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525899 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525924 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c6201c8-50a8-4b95-82e0-b944b78348d6-etc-machine-id\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.525942 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-config-data\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.526033 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtz68\" (UniqueName: \"kubernetes.io/projected/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-kube-api-access-jtz68\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.526062 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.526098 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-scripts\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.526116 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-run-httpd\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.526150 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-combined-ca-bundle\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.526181 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv4cd\" (UniqueName: \"kubernetes.io/projected/8c6201c8-50a8-4b95-82e0-b944b78348d6-kube-api-access-rv4cd\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.598174 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-m5l24"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.629918 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-db-sync-config-data\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.629977 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-config\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630006 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-scripts\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630031 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-combined-ca-bundle\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630087 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-config-data\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630114 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjjx5\" (UniqueName: \"kubernetes.io/projected/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-kube-api-access-mjjx5\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630135 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-log-httpd\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630160 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630193 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c6201c8-50a8-4b95-82e0-b944b78348d6-etc-machine-id\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630213 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-config-data\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630237 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtz68\" (UniqueName: \"kubernetes.io/projected/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-kube-api-access-jtz68\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630261 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630282 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-scripts\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630299 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-run-httpd\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630335 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-combined-ca-bundle\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.630351 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv4cd\" (UniqueName: \"kubernetes.io/projected/8c6201c8-50a8-4b95-82e0-b944b78348d6-kube-api-access-rv4cd\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.636739 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-run-httpd\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.646240 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c6201c8-50a8-4b95-82e0-b944b78348d6-etc-machine-id\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.647115 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-combined-ca-bundle\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.647372 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-log-httpd\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.647411 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-config-data\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.648296 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-x75d6"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.650307 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.652352 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv4cd\" (UniqueName: \"kubernetes.io/projected/8c6201c8-50a8-4b95-82e0-b944b78348d6-kube-api-access-rv4cd\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.654582 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-combined-ca-bundle\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.655560 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-scripts\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.656559 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.656629 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.656637 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.656946 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-db-sync-config-data\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.657290 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x75d6"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.662208 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-config-data\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.663465 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-brpvh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.672250 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-g89td"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.673240 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.673374 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjjx5\" (UniqueName: \"kubernetes.io/projected/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-kube-api-access-mjjx5\") pod \"ceilometer-0\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.674334 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtz68\" (UniqueName: \"kubernetes.io/projected/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-kube-api-access-jtz68\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.676703 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-t2jgb" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.676790 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-config\") pod \"neutron-db-sync-8b6sf\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.676915 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.677098 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.677118 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-scripts\") pod \"cinder-db-sync-c5jkq\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.681480 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-g89td"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.686153 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.690508 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-ksmkh"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.691875 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.725798 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-ksmkh"] Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.742409 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844228 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-dns-svc\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844479 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-combined-ca-bundle\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844537 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-db-sync-config-data\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844557 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-scripts\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844579 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-nb\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844599 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-combined-ca-bundle\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844615 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-config\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844638 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xflmb\" (UniqueName: \"kubernetes.io/projected/d200c5c2-f7a9-4db9-b65c-18658065131d-kube-api-access-xflmb\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844675 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrljs\" (UniqueName: \"kubernetes.io/projected/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-kube-api-access-jrljs\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844713 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-sb\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844742 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d200c5c2-f7a9-4db9-b65c-18658065131d-logs\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844769 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-config-data\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.844795 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbdq7\" (UniqueName: \"kubernetes.io/projected/3af738ab-b461-4f39-981a-0375787c2c64-kube-api-access-rbdq7\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946753 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-combined-ca-bundle\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946835 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-db-sync-config-data\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946853 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-scripts\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946879 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-nb\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946897 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-combined-ca-bundle\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946918 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-config\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946941 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xflmb\" (UniqueName: \"kubernetes.io/projected/d200c5c2-f7a9-4db9-b65c-18658065131d-kube-api-access-xflmb\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946965 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrljs\" (UniqueName: \"kubernetes.io/projected/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-kube-api-access-jrljs\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.946994 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-sb\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.947024 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d200c5c2-f7a9-4db9-b65c-18658065131d-logs\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.947045 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-config-data\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.947062 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbdq7\" (UniqueName: \"kubernetes.io/projected/3af738ab-b461-4f39-981a-0375787c2c64-kube-api-access-rbdq7\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.947101 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-dns-svc\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.947953 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-dns-svc\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.950388 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d200c5c2-f7a9-4db9-b65c-18658065131d-logs\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.951807 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-sb\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.952417 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.952513 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-nb\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.954846 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-config\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.958121 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-combined-ca-bundle\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.959239 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-db-sync-config-data\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.975794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrljs\" (UniqueName: \"kubernetes.io/projected/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-kube-api-access-jrljs\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.981268 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xflmb\" (UniqueName: \"kubernetes.io/projected/d200c5c2-f7a9-4db9-b65c-18658065131d-kube-api-access-xflmb\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.984096 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-config-data\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.985064 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-scripts\") pod \"placement-db-sync-g89td\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " pod="openstack/placement-db-sync-g89td" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.985652 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-combined-ca-bundle\") pod \"barbican-db-sync-x75d6\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:37 crc kubenswrapper[4861]: I0129 06:54:37.988738 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbdq7\" (UniqueName: \"kubernetes.io/projected/3af738ab-b461-4f39-981a-0375787c2c64-kube-api-access-rbdq7\") pod \"dnsmasq-dns-67f84f7cd9-ksmkh\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.039653 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x75d6" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.078383 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"849976197be27f3f0414f54d8c975813716a50cdd59b37975b4eb4bb0b453c69"} Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.090343 4861 generic.go:334] "Generic (PLEG): container finished" podID="674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" containerID="9cca2e1a61b0b75fd9bcfebdf3e069afd15538d25826c872d043bad975a11e14" exitCode=0 Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.090389 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bfd654465-96x4v" event={"ID":"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b","Type":"ContainerDied","Data":"9cca2e1a61b0b75fd9bcfebdf3e069afd15538d25826c872d043bad975a11e14"} Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.130439 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-g89td" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.154883 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.160539 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-m5l24"] Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.315718 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.317824 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.321162 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.321196 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-nnz82" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.321820 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.324521 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-m2rjw"] Jan 29 06:54:38 crc kubenswrapper[4861]: W0129 06:54:38.328495 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe608de7_3ef1_4706_9527_d216de787b20.slice/crio-f7a6980694cad2dcdc321f0dfc053fd4876c3c54dbc87f508ac7f9e7b53e52bc WatchSource:0}: Error finding container f7a6980694cad2dcdc321f0dfc053fd4876c3c54dbc87f508ac7f9e7b53e52bc: Status 404 returned error can't find the container with id f7a6980694cad2dcdc321f0dfc053fd4876c3c54dbc87f508ac7f9e7b53e52bc Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.354489 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.368152 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.375823 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.378030 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.418489 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.466186 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-logs\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.466541 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzd2x\" (UniqueName: \"kubernetes.io/projected/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-kube-api-access-bzd2x\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.466572 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.466704 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.466760 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5rqd\" (UniqueName: \"kubernetes.io/projected/a88b567a-ded6-44c8-8736-7ac3ccc665f7-kube-api-access-g5rqd\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.466806 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.466873 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.466954 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.467002 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.468337 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.468407 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.468447 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.468507 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-logs\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.468533 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.569997 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570043 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570065 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570112 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570136 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-logs\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570154 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570186 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-logs\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570215 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzd2x\" (UniqueName: \"kubernetes.io/projected/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-kube-api-access-bzd2x\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570238 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.570254 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.571531 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-logs\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.572037 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5rqd\" (UniqueName: \"kubernetes.io/projected/a88b567a-ded6-44c8-8736-7ac3ccc665f7-kube-api-access-g5rqd\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.572107 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.572162 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.572197 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.572793 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.574264 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.574298 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.577617 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-logs\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.577857 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.578122 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.588570 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.595034 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.599156 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.599654 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.600141 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.602919 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzd2x\" (UniqueName: \"kubernetes.io/projected/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-kube-api-access-bzd2x\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.613095 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5rqd\" (UniqueName: \"kubernetes.io/projected/a88b567a-ded6-44c8-8736-7ac3ccc665f7-kube-api-access-g5rqd\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.639936 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-8b6sf"] Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.640968 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.649906 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:38 crc kubenswrapper[4861]: W0129 06:54:38.650643 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod597c5a3a_501d_40ab_9e2c_0ed9fb3dcc74.slice/crio-3dd6e097122ebfee4673607e08493473c5a4f905de2518d38689bb9f4491821c WatchSource:0}: Error finding container 3dd6e097122ebfee4673607e08493473c5a4f905de2518d38689bb9f4491821c: Status 404 returned error can't find the container with id 3dd6e097122ebfee4673607e08493473c5a4f905de2518d38689bb9f4491821c Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.655813 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.673328 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.703623 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.774962 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-c5jkq"] Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.775545 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-sb\") pod \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.775599 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqtl7\" (UniqueName: \"kubernetes.io/projected/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-kube-api-access-lqtl7\") pod \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.775638 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-config\") pod \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.775717 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-nb\") pod \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.775819 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-dns-svc\") pod \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\" (UID: \"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b\") " Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.797155 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-kube-api-access-lqtl7" (OuterVolumeSpecName: "kube-api-access-lqtl7") pod "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" (UID: "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b"). InnerVolumeSpecName "kube-api-access-lqtl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.802035 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" (UID: "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.802563 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" (UID: "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.802895 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-config" (OuterVolumeSpecName: "config") pod "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" (UID: "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.813320 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" (UID: "674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.878552 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.878789 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.878801 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqtl7\" (UniqueName: \"kubernetes.io/projected/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-kube-api-access-lqtl7\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.878810 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.878818 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:38 crc kubenswrapper[4861]: I0129 06:54:38.960218 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.142920 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-g89td"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.142945 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x75d6"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.142954 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-c5jkq" event={"ID":"8c6201c8-50a8-4b95-82e0-b944b78348d6","Type":"ContainerStarted","Data":"5e62b0f2857da3bd0b6030be8d4998377760485d905fdb602e7adde9d648cbfb"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.142968 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerStarted","Data":"0f6ad2c5dcab8a4a865c78703af5ec17abaa3949c079878716530d1bf7fd0391"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.142982 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7de4c2d2-3468-4522-8f5d-5acf0d1c4806","Type":"ContainerStarted","Data":"ad6f3438c905e90b73f506c79f0dadb7c5fbbe874aed3ca2c0cff532317759b9"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.164584 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m2rjw" event={"ID":"be608de7-3ef1-4706-9527-d216de787b20","Type":"ContainerStarted","Data":"39b1268ad20f3405e1e31dcaa8ad34a915ba32a7d48ac681513976c40074ca2f"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.164631 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m2rjw" event={"ID":"be608de7-3ef1-4706-9527-d216de787b20","Type":"ContainerStarted","Data":"f7a6980694cad2dcdc321f0dfc053fd4876c3c54dbc87f508ac7f9e7b53e52bc"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.197568 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8b6sf" event={"ID":"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74","Type":"ContainerStarted","Data":"ba7f7b8aa0a69ef1edc92b25a04e8ad86840de8d862f70596abc49240736dd9c"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.197611 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8b6sf" event={"ID":"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74","Type":"ContainerStarted","Data":"3dd6e097122ebfee4673607e08493473c5a4f905de2518d38689bb9f4491821c"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.210527 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bfd654465-96x4v" event={"ID":"674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b","Type":"ContainerDied","Data":"1ff05f024605d8c841bfe8a28e637520fa7588a704a712a1d7dfcd0cf126c9bf"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.210786 4861 scope.go:117] "RemoveContainer" containerID="9cca2e1a61b0b75fd9bcfebdf3e069afd15538d25826c872d043bad975a11e14" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.210971 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bfd654465-96x4v" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.218471 4861 generic.go:334] "Generic (PLEG): container finished" podID="e173b03d-2076-4c17-9ff5-1b35da3c6af1" containerID="ae528a5ee3b6de5de313827aec3e266dd50fdf13a74aeae2f1fbe4b2f1b8e0ae" exitCode=0 Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.218543 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-99559fbf5-m5l24" event={"ID":"e173b03d-2076-4c17-9ff5-1b35da3c6af1","Type":"ContainerDied","Data":"ae528a5ee3b6de5de313827aec3e266dd50fdf13a74aeae2f1fbe4b2f1b8e0ae"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.218568 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-99559fbf5-m5l24" event={"ID":"e173b03d-2076-4c17-9ff5-1b35da3c6af1","Type":"ContainerStarted","Data":"7d0741f9b28e73beb862fa3eefd6db5ebdbe5801bb97c048bd8e0f1a3431c70b"} Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.240239 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-ksmkh"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.327297 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=38.999046195 podStartE2EDuration="47.327277471s" podCreationTimestamp="2026-01-29 06:53:52 +0000 UTC" firstStartedPulling="2026-01-29 06:54:26.339012759 +0000 UTC m=+1158.010507316" lastFinishedPulling="2026-01-29 06:54:34.667244045 +0000 UTC m=+1166.338738592" observedRunningTime="2026-01-29 06:54:39.316589208 +0000 UTC m=+1170.988083765" watchObservedRunningTime="2026-01-29 06:54:39.327277471 +0000 UTC m=+1170.998772028" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.385641 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-m2rjw" podStartSLOduration=2.385619831 podStartE2EDuration="2.385619831s" podCreationTimestamp="2026-01-29 06:54:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:39.36873004 +0000 UTC m=+1171.040224617" watchObservedRunningTime="2026-01-29 06:54:39.385619831 +0000 UTC m=+1171.057114388" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.434127 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-96x4v"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.438198 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-96x4v"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.461231 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.622316 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-ksmkh"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.643657 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-j6bzt"] Jan 29 06:54:39 crc kubenswrapper[4861]: E0129 06:54:39.644023 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" containerName="init" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.644035 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" containerName="init" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.645994 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" containerName="init" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.646842 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.656513 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.657473 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-j6bzt"] Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.682342 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.807481 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-nb\") pod \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.807523 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-dns-svc\") pod \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.807577 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-sb\") pod \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.807659 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7ppl\" (UniqueName: \"kubernetes.io/projected/e173b03d-2076-4c17-9ff5-1b35da3c6af1-kube-api-access-n7ppl\") pod \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.807703 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-config\") pod \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\" (UID: \"e173b03d-2076-4c17-9ff5-1b35da3c6af1\") " Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.808020 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-svc\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.808117 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.808159 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlf84\" (UniqueName: \"kubernetes.io/projected/fa374fd0-6b03-475d-8230-9fb2a9768091-kube-api-access-hlf84\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.808178 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.808197 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-config\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.808531 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.817104 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e173b03d-2076-4c17-9ff5-1b35da3c6af1-kube-api-access-n7ppl" (OuterVolumeSpecName: "kube-api-access-n7ppl") pod "e173b03d-2076-4c17-9ff5-1b35da3c6af1" (UID: "e173b03d-2076-4c17-9ff5-1b35da3c6af1"). InnerVolumeSpecName "kube-api-access-n7ppl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.831036 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e173b03d-2076-4c17-9ff5-1b35da3c6af1" (UID: "e173b03d-2076-4c17-9ff5-1b35da3c6af1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.840098 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e173b03d-2076-4c17-9ff5-1b35da3c6af1" (UID: "e173b03d-2076-4c17-9ff5-1b35da3c6af1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.853917 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-config" (OuterVolumeSpecName: "config") pod "e173b03d-2076-4c17-9ff5-1b35da3c6af1" (UID: "e173b03d-2076-4c17-9ff5-1b35da3c6af1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.891780 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e173b03d-2076-4c17-9ff5-1b35da3c6af1" (UID: "e173b03d-2076-4c17-9ff5-1b35da3c6af1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.910343 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-svc\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915249 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915379 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlf84\" (UniqueName: \"kubernetes.io/projected/fa374fd0-6b03-475d-8230-9fb2a9768091-kube-api-access-hlf84\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915419 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-config\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915436 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915461 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915583 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7ppl\" (UniqueName: \"kubernetes.io/projected/e173b03d-2076-4c17-9ff5-1b35da3c6af1-kube-api-access-n7ppl\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915596 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915607 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915616 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915623 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e173b03d-2076-4c17-9ff5-1b35da3c6af1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.916353 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-config\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.916495 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.916998 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.915431 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-svc\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.917426 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:39 crc kubenswrapper[4861]: I0129 06:54:39.939119 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlf84\" (UniqueName: \"kubernetes.io/projected/fa374fd0-6b03-475d-8230-9fb2a9768091-kube-api-access-hlf84\") pod \"dnsmasq-dns-6f6f8cb849-j6bzt\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.006596 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.010106 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:54:40 crc kubenswrapper[4861]: W0129 06:54:40.042670 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b4cf8ff_a136_4ecb_bb04_b44475d3832b.slice/crio-ad05784e84ea463e29de0dab4109f5eb8d93a4c0cd36639365e1fecda34da32c WatchSource:0}: Error finding container ad05784e84ea463e29de0dab4109f5eb8d93a4c0cd36639365e1fecda34da32c: Status 404 returned error can't find the container with id ad05784e84ea463e29de0dab4109f5eb8d93a4c0cd36639365e1fecda34da32c Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.230834 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x75d6" event={"ID":"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe","Type":"ContainerStarted","Data":"c9348043bb67229cd9e7813a5bcaeed6d1baa8f1c993fb695c302adf02b83012"} Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.235019 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-99559fbf5-m5l24" event={"ID":"e173b03d-2076-4c17-9ff5-1b35da3c6af1","Type":"ContainerDied","Data":"7d0741f9b28e73beb862fa3eefd6db5ebdbe5801bb97c048bd8e0f1a3431c70b"} Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.235086 4861 scope.go:117] "RemoveContainer" containerID="ae528a5ee3b6de5de313827aec3e266dd50fdf13a74aeae2f1fbe4b2f1b8e0ae" Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.235100 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-99559fbf5-m5l24" Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.238015 4861 generic.go:334] "Generic (PLEG): container finished" podID="3af738ab-b461-4f39-981a-0375787c2c64" containerID="b18d8a3a28970581f54e1b3d38cdffcfb8298eaa5c39136dd7eefb911db2d039" exitCode=0 Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.238064 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" event={"ID":"3af738ab-b461-4f39-981a-0375787c2c64","Type":"ContainerDied","Data":"b18d8a3a28970581f54e1b3d38cdffcfb8298eaa5c39136dd7eefb911db2d039"} Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.238100 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" event={"ID":"3af738ab-b461-4f39-981a-0375787c2c64","Type":"ContainerStarted","Data":"b42b9db098b04b77821bd343cffd3b88b958f62533372d2345e1be8a30a302a5"} Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.269241 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b4cf8ff-a136-4ecb-bb04-b44475d3832b","Type":"ContainerStarted","Data":"ad05784e84ea463e29de0dab4109f5eb8d93a4c0cd36639365e1fecda34da32c"} Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.295785 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a88b567a-ded6-44c8-8736-7ac3ccc665f7","Type":"ContainerStarted","Data":"e8dc3752c3691b802ff940938f8100fdd805ec2f92eeefa6859f1b8786f16271"} Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.311964 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-g89td" event={"ID":"d200c5c2-f7a9-4db9-b65c-18658065131d","Type":"ContainerStarted","Data":"e40c7f0ae2a442b0075874a95657683c97638c0d6f8a94af8c2cac0422b7afb2"} Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.374961 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-8b6sf" podStartSLOduration=3.374944193 podStartE2EDuration="3.374944193s" podCreationTimestamp="2026-01-29 06:54:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:40.366734063 +0000 UTC m=+1172.038228630" watchObservedRunningTime="2026-01-29 06:54:40.374944193 +0000 UTC m=+1172.046438750" Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.446120 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-m5l24"] Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.531526 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-m5l24"] Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.616701 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-j6bzt"] Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.702362 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.842818 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:54:40 crc kubenswrapper[4861]: I0129 06:54:40.866444 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:54:41 crc kubenswrapper[4861]: I0129 06:54:41.139858 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b" path="/var/lib/kubelet/pods/674e0afa-f2e4-4ed3-8ff2-7ef7c761d38b/volumes" Jan 29 06:54:41 crc kubenswrapper[4861]: I0129 06:54:41.141059 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e173b03d-2076-4c17-9ff5-1b35da3c6af1" path="/var/lib/kubelet/pods/e173b03d-2076-4c17-9ff5-1b35da3c6af1/volumes" Jan 29 06:54:41 crc kubenswrapper[4861]: I0129 06:54:41.348405 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" event={"ID":"fa374fd0-6b03-475d-8230-9fb2a9768091","Type":"ContainerStarted","Data":"ddf002d2b21cac7beb20b97fe76c1d0bda80dbc4238c13fb2244922405a64059"} Jan 29 06:54:41 crc kubenswrapper[4861]: I0129 06:54:41.355112 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" event={"ID":"3af738ab-b461-4f39-981a-0375787c2c64","Type":"ContainerStarted","Data":"21e34faf66af2464541ef2fdb4a7db7840ebb64b71dc1262d81931d40a47236c"} Jan 29 06:54:41 crc kubenswrapper[4861]: I0129 06:54:41.355173 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" podUID="3af738ab-b461-4f39-981a-0375787c2c64" containerName="dnsmasq-dns" containerID="cri-o://21e34faf66af2464541ef2fdb4a7db7840ebb64b71dc1262d81931d40a47236c" gracePeriod=10 Jan 29 06:54:41 crc kubenswrapper[4861]: I0129 06:54:41.355213 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:41 crc kubenswrapper[4861]: I0129 06:54:41.361596 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a88b567a-ded6-44c8-8736-7ac3ccc665f7","Type":"ContainerStarted","Data":"7578441fd0619de92995ab3b49ab32ffad625eeff9a2ad9ff3b70cbfc4b9e7c4"} Jan 29 06:54:41 crc kubenswrapper[4861]: I0129 06:54:41.373306 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" podStartSLOduration=4.373287874 podStartE2EDuration="4.373287874s" podCreationTimestamp="2026-01-29 06:54:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:41.369561628 +0000 UTC m=+1173.041056195" watchObservedRunningTime="2026-01-29 06:54:41.373287874 +0000 UTC m=+1173.044782431" Jan 29 06:54:42 crc kubenswrapper[4861]: I0129 06:54:42.376757 4861 generic.go:334] "Generic (PLEG): container finished" podID="3af738ab-b461-4f39-981a-0375787c2c64" containerID="21e34faf66af2464541ef2fdb4a7db7840ebb64b71dc1262d81931d40a47236c" exitCode=0 Jan 29 06:54:42 crc kubenswrapper[4861]: I0129 06:54:42.377008 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" event={"ID":"3af738ab-b461-4f39-981a-0375787c2c64","Type":"ContainerDied","Data":"21e34faf66af2464541ef2fdb4a7db7840ebb64b71dc1262d81931d40a47236c"} Jan 29 06:54:45 crc kubenswrapper[4861]: I0129 06:54:45.412680 4861 generic.go:334] "Generic (PLEG): container finished" podID="fa374fd0-6b03-475d-8230-9fb2a9768091" containerID="c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32" exitCode=0 Jan 29 06:54:45 crc kubenswrapper[4861]: I0129 06:54:45.412790 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" event={"ID":"fa374fd0-6b03-475d-8230-9fb2a9768091","Type":"ContainerDied","Data":"c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32"} Jan 29 06:54:45 crc kubenswrapper[4861]: I0129 06:54:45.418677 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b4cf8ff-a136-4ecb-bb04-b44475d3832b","Type":"ContainerStarted","Data":"4ed67a4d791aaf2962c06c57850309cf00bcc6e77e6f490c2fc39ff3df92af0c"} Jan 29 06:54:45 crc kubenswrapper[4861]: I0129 06:54:45.440342 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a88b567a-ded6-44c8-8736-7ac3ccc665f7","Type":"ContainerStarted","Data":"752086d556d86b2b95ab0e4ed9524ae139ddbea9ba153750505afc005bc1db9a"} Jan 29 06:54:45 crc kubenswrapper[4861]: I0129 06:54:45.440510 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerName="glance-log" containerID="cri-o://7578441fd0619de92995ab3b49ab32ffad625eeff9a2ad9ff3b70cbfc4b9e7c4" gracePeriod=30 Jan 29 06:54:45 crc kubenswrapper[4861]: I0129 06:54:45.440939 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerName="glance-httpd" containerID="cri-o://752086d556d86b2b95ab0e4ed9524ae139ddbea9ba153750505afc005bc1db9a" gracePeriod=30 Jan 29 06:54:46 crc kubenswrapper[4861]: I0129 06:54:46.456449 4861 generic.go:334] "Generic (PLEG): container finished" podID="be608de7-3ef1-4706-9527-d216de787b20" containerID="39b1268ad20f3405e1e31dcaa8ad34a915ba32a7d48ac681513976c40074ca2f" exitCode=0 Jan 29 06:54:46 crc kubenswrapper[4861]: I0129 06:54:46.456481 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m2rjw" event={"ID":"be608de7-3ef1-4706-9527-d216de787b20","Type":"ContainerDied","Data":"39b1268ad20f3405e1e31dcaa8ad34a915ba32a7d48ac681513976c40074ca2f"} Jan 29 06:54:46 crc kubenswrapper[4861]: I0129 06:54:46.460659 4861 generic.go:334] "Generic (PLEG): container finished" podID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerID="752086d556d86b2b95ab0e4ed9524ae139ddbea9ba153750505afc005bc1db9a" exitCode=0 Jan 29 06:54:46 crc kubenswrapper[4861]: I0129 06:54:46.460689 4861 generic.go:334] "Generic (PLEG): container finished" podID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerID="7578441fd0619de92995ab3b49ab32ffad625eeff9a2ad9ff3b70cbfc4b9e7c4" exitCode=143 Jan 29 06:54:46 crc kubenswrapper[4861]: I0129 06:54:46.460714 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a88b567a-ded6-44c8-8736-7ac3ccc665f7","Type":"ContainerDied","Data":"752086d556d86b2b95ab0e4ed9524ae139ddbea9ba153750505afc005bc1db9a"} Jan 29 06:54:46 crc kubenswrapper[4861]: I0129 06:54:46.460756 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a88b567a-ded6-44c8-8736-7ac3ccc665f7","Type":"ContainerDied","Data":"7578441fd0619de92995ab3b49ab32ffad625eeff9a2ad9ff3b70cbfc4b9e7c4"} Jan 29 06:54:46 crc kubenswrapper[4861]: I0129 06:54:46.480147 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.480085171 podStartE2EDuration="9.480085171s" podCreationTimestamp="2026-01-29 06:54:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:45.468898232 +0000 UTC m=+1177.140392799" watchObservedRunningTime="2026-01-29 06:54:46.480085171 +0000 UTC m=+1178.151579728" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.396529 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.476078 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" event={"ID":"3af738ab-b461-4f39-981a-0375787c2c64","Type":"ContainerDied","Data":"b42b9db098b04b77821bd343cffd3b88b958f62533372d2345e1be8a30a302a5"} Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.476295 4861 scope.go:117] "RemoveContainer" containerID="21e34faf66af2464541ef2fdb4a7db7840ebb64b71dc1262d81931d40a47236c" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.476328 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.481310 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b4cf8ff-a136-4ecb-bb04-b44475d3832b","Type":"ContainerStarted","Data":"2493fc0811f212bdea381ebe85b017b90079190578fcb6cc79a84dfe2071ef12"} Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.481445 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerName="glance-log" containerID="cri-o://4ed67a4d791aaf2962c06c57850309cf00bcc6e77e6f490c2fc39ff3df92af0c" gracePeriod=30 Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.481557 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerName="glance-httpd" containerID="cri-o://2493fc0811f212bdea381ebe85b017b90079190578fcb6cc79a84dfe2071ef12" gracePeriod=30 Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.559536 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-config\") pod \"3af738ab-b461-4f39-981a-0375787c2c64\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.560239 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbdq7\" (UniqueName: \"kubernetes.io/projected/3af738ab-b461-4f39-981a-0375787c2c64-kube-api-access-rbdq7\") pod \"3af738ab-b461-4f39-981a-0375787c2c64\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.560502 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-dns-svc\") pod \"3af738ab-b461-4f39-981a-0375787c2c64\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.560677 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-sb\") pod \"3af738ab-b461-4f39-981a-0375787c2c64\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.560754 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-nb\") pod \"3af738ab-b461-4f39-981a-0375787c2c64\" (UID: \"3af738ab-b461-4f39-981a-0375787c2c64\") " Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.625281 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3af738ab-b461-4f39-981a-0375787c2c64-kube-api-access-rbdq7" (OuterVolumeSpecName: "kube-api-access-rbdq7") pod "3af738ab-b461-4f39-981a-0375787c2c64" (UID: "3af738ab-b461-4f39-981a-0375787c2c64"). InnerVolumeSpecName "kube-api-access-rbdq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.664170 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbdq7\" (UniqueName: \"kubernetes.io/projected/3af738ab-b461-4f39-981a-0375787c2c64-kube-api-access-rbdq7\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.705574 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3af738ab-b461-4f39-981a-0375787c2c64" (UID: "3af738ab-b461-4f39-981a-0375787c2c64"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.705588 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3af738ab-b461-4f39-981a-0375787c2c64" (UID: "3af738ab-b461-4f39-981a-0375787c2c64"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.705638 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-config" (OuterVolumeSpecName: "config") pod "3af738ab-b461-4f39-981a-0375787c2c64" (UID: "3af738ab-b461-4f39-981a-0375787c2c64"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.705726 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3af738ab-b461-4f39-981a-0375787c2c64" (UID: "3af738ab-b461-4f39-981a-0375787c2c64"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.765910 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.765951 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.765965 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.765977 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af738ab-b461-4f39-981a-0375787c2c64-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.834466 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=11.834449381 podStartE2EDuration="11.834449381s" podCreationTimestamp="2026-01-29 06:54:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:54:48.506537535 +0000 UTC m=+1180.178032092" watchObservedRunningTime="2026-01-29 06:54:48.834449381 +0000 UTC m=+1180.505943938" Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.847151 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-ksmkh"] Jan 29 06:54:48 crc kubenswrapper[4861]: I0129 06:54:48.853047 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-ksmkh"] Jan 29 06:54:49 crc kubenswrapper[4861]: I0129 06:54:49.125011 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3af738ab-b461-4f39-981a-0375787c2c64" path="/var/lib/kubelet/pods/3af738ab-b461-4f39-981a-0375787c2c64/volumes" Jan 29 06:54:49 crc kubenswrapper[4861]: I0129 06:54:49.497539 4861 generic.go:334] "Generic (PLEG): container finished" podID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerID="2493fc0811f212bdea381ebe85b017b90079190578fcb6cc79a84dfe2071ef12" exitCode=0 Jan 29 06:54:49 crc kubenswrapper[4861]: I0129 06:54:49.497576 4861 generic.go:334] "Generic (PLEG): container finished" podID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerID="4ed67a4d791aaf2962c06c57850309cf00bcc6e77e6f490c2fc39ff3df92af0c" exitCode=143 Jan 29 06:54:49 crc kubenswrapper[4861]: I0129 06:54:49.497600 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b4cf8ff-a136-4ecb-bb04-b44475d3832b","Type":"ContainerDied","Data":"2493fc0811f212bdea381ebe85b017b90079190578fcb6cc79a84dfe2071ef12"} Jan 29 06:54:49 crc kubenswrapper[4861]: I0129 06:54:49.497627 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b4cf8ff-a136-4ecb-bb04-b44475d3832b","Type":"ContainerDied","Data":"4ed67a4d791aaf2962c06c57850309cf00bcc6e77e6f490c2fc39ff3df92af0c"} Jan 29 06:54:53 crc kubenswrapper[4861]: I0129 06:54:53.156294 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-67f84f7cd9-ksmkh" podUID="3af738ab-b461-4f39-981a-0375787c2c64" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Jan 29 06:55:00 crc kubenswrapper[4861]: E0129 06:55:00.418876 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api@sha256:33f4e5f7a715d48482ec46a42267ea992fa268585303c4f1bd3cbea072a6348b" Jan 29 06:55:00 crc kubenswrapper[4861]: E0129 06:55:00.419577 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api@sha256:33f4e5f7a715d48482ec46a42267ea992fa268585303c4f1bd3cbea072a6348b,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xflmb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-g89td_openstack(d200c5c2-f7a9-4db9-b65c-18658065131d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:55:00 crc kubenswrapper[4861]: E0129 06:55:00.421086 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-g89td" podUID="d200c5c2-f7a9-4db9-b65c-18658065131d" Jan 29 06:55:00 crc kubenswrapper[4861]: E0129 06:55:00.613369 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api@sha256:33f4e5f7a715d48482ec46a42267ea992fa268585303c4f1bd3cbea072a6348b\\\"\"" pod="openstack/placement-db-sync-g89td" podUID="d200c5c2-f7a9-4db9-b65c-18658065131d" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.494138 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.654657 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m2rjw" event={"ID":"be608de7-3ef1-4706-9527-d216de787b20","Type":"ContainerDied","Data":"f7a6980694cad2dcdc321f0dfc053fd4876c3c54dbc87f508ac7f9e7b53e52bc"} Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.654715 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7a6980694cad2dcdc321f0dfc053fd4876c3c54dbc87f508ac7f9e7b53e52bc" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.654789 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m2rjw" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.674243 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-credential-keys\") pod \"be608de7-3ef1-4706-9527-d216de787b20\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.674359 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-combined-ca-bundle\") pod \"be608de7-3ef1-4706-9527-d216de787b20\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.674390 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-config-data\") pod \"be608de7-3ef1-4706-9527-d216de787b20\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.674454 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-fernet-keys\") pod \"be608de7-3ef1-4706-9527-d216de787b20\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.674488 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-scripts\") pod \"be608de7-3ef1-4706-9527-d216de787b20\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.674505 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pp9ms\" (UniqueName: \"kubernetes.io/projected/be608de7-3ef1-4706-9527-d216de787b20-kube-api-access-pp9ms\") pod \"be608de7-3ef1-4706-9527-d216de787b20\" (UID: \"be608de7-3ef1-4706-9527-d216de787b20\") " Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.714879 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be608de7-3ef1-4706-9527-d216de787b20-kube-api-access-pp9ms" (OuterVolumeSpecName: "kube-api-access-pp9ms") pod "be608de7-3ef1-4706-9527-d216de787b20" (UID: "be608de7-3ef1-4706-9527-d216de787b20"). InnerVolumeSpecName "kube-api-access-pp9ms". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.714853 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "be608de7-3ef1-4706-9527-d216de787b20" (UID: "be608de7-3ef1-4706-9527-d216de787b20"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.714961 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "be608de7-3ef1-4706-9527-d216de787b20" (UID: "be608de7-3ef1-4706-9527-d216de787b20"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.731240 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-scripts" (OuterVolumeSpecName: "scripts") pod "be608de7-3ef1-4706-9527-d216de787b20" (UID: "be608de7-3ef1-4706-9527-d216de787b20"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.771068 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-config-data" (OuterVolumeSpecName: "config-data") pod "be608de7-3ef1-4706-9527-d216de787b20" (UID: "be608de7-3ef1-4706-9527-d216de787b20"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.778013 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be608de7-3ef1-4706-9527-d216de787b20" (UID: "be608de7-3ef1-4706-9527-d216de787b20"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.779212 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.779248 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.779258 4861 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.779267 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.779276 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pp9ms\" (UniqueName: \"kubernetes.io/projected/be608de7-3ef1-4706-9527-d216de787b20-kube-api-access-pp9ms\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:02 crc kubenswrapper[4861]: I0129 06:55:02.779284 4861 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/be608de7-3ef1-4706-9527-d216de787b20-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.583050 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-m2rjw"] Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.590110 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-m2rjw"] Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.692062 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-xqbhr"] Jan 29 06:55:03 crc kubenswrapper[4861]: E0129 06:55:03.693583 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3af738ab-b461-4f39-981a-0375787c2c64" containerName="dnsmasq-dns" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.693628 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3af738ab-b461-4f39-981a-0375787c2c64" containerName="dnsmasq-dns" Jan 29 06:55:03 crc kubenswrapper[4861]: E0129 06:55:03.693646 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be608de7-3ef1-4706-9527-d216de787b20" containerName="keystone-bootstrap" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.693652 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="be608de7-3ef1-4706-9527-d216de787b20" containerName="keystone-bootstrap" Jan 29 06:55:03 crc kubenswrapper[4861]: E0129 06:55:03.693671 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e173b03d-2076-4c17-9ff5-1b35da3c6af1" containerName="init" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.693677 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e173b03d-2076-4c17-9ff5-1b35da3c6af1" containerName="init" Jan 29 06:55:03 crc kubenswrapper[4861]: E0129 06:55:03.693712 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3af738ab-b461-4f39-981a-0375787c2c64" containerName="init" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.693719 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3af738ab-b461-4f39-981a-0375787c2c64" containerName="init" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.693955 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3af738ab-b461-4f39-981a-0375787c2c64" containerName="dnsmasq-dns" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.693971 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="be608de7-3ef1-4706-9527-d216de787b20" containerName="keystone-bootstrap" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.693990 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e173b03d-2076-4c17-9ff5-1b35da3c6af1" containerName="init" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.694731 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.699603 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.699662 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.699708 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.699603 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.700820 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xp8pr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.708224 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xqbhr"] Jan 29 06:55:03 crc kubenswrapper[4861]: E0129 06:55:03.786311 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49" Jan 29 06:55:03 crc kubenswrapper[4861]: E0129 06:55:03.786560 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rv4cd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-c5jkq_openstack(8c6201c8-50a8-4b95-82e0-b944b78348d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:55:03 crc kubenswrapper[4861]: E0129 06:55:03.787780 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-c5jkq" podUID="8c6201c8-50a8-4b95-82e0-b944b78348d6" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.796091 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-scripts\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.796176 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-config-data\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.796233 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xt2pn\" (UniqueName: \"kubernetes.io/projected/d48d2111-d309-4775-b728-cdd8b7163ebc-kube-api-access-xt2pn\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.796332 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-fernet-keys\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.796373 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-credential-keys\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.796406 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-combined-ca-bundle\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.847052 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.904668 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-fernet-keys\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.904734 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-credential-keys\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.904764 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-combined-ca-bundle\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.904798 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-scripts\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.904831 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-config-data\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.904870 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xt2pn\" (UniqueName: \"kubernetes.io/projected/d48d2111-d309-4775-b728-cdd8b7163ebc-kube-api-access-xt2pn\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.909864 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-scripts\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.910705 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-fernet-keys\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.920671 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-config-data\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.922739 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-credential-keys\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.923174 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xt2pn\" (UniqueName: \"kubernetes.io/projected/d48d2111-d309-4775-b728-cdd8b7163ebc-kube-api-access-xt2pn\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:03 crc kubenswrapper[4861]: I0129 06:55:03.925654 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-combined-ca-bundle\") pod \"keystone-bootstrap-xqbhr\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.005692 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5rqd\" (UniqueName: \"kubernetes.io/projected/a88b567a-ded6-44c8-8736-7ac3ccc665f7-kube-api-access-g5rqd\") pod \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.005784 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-logs\") pod \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.005812 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-config-data\") pod \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.005861 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.005907 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-combined-ca-bundle\") pod \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.005933 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-httpd-run\") pod \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.005988 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-scripts\") pod \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\" (UID: \"a88b567a-ded6-44c8-8736-7ac3ccc665f7\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.006608 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-logs" (OuterVolumeSpecName: "logs") pod "a88b567a-ded6-44c8-8736-7ac3ccc665f7" (UID: "a88b567a-ded6-44c8-8736-7ac3ccc665f7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.006619 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a88b567a-ded6-44c8-8736-7ac3ccc665f7" (UID: "a88b567a-ded6-44c8-8736-7ac3ccc665f7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.007535 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.007600 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a88b567a-ded6-44c8-8736-7ac3ccc665f7-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.014559 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-scripts" (OuterVolumeSpecName: "scripts") pod "a88b567a-ded6-44c8-8736-7ac3ccc665f7" (UID: "a88b567a-ded6-44c8-8736-7ac3ccc665f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.018495 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a88b567a-ded6-44c8-8736-7ac3ccc665f7-kube-api-access-g5rqd" (OuterVolumeSpecName: "kube-api-access-g5rqd") pod "a88b567a-ded6-44c8-8736-7ac3ccc665f7" (UID: "a88b567a-ded6-44c8-8736-7ac3ccc665f7"). InnerVolumeSpecName "kube-api-access-g5rqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.022795 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.017679 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "a88b567a-ded6-44c8-8736-7ac3ccc665f7" (UID: "a88b567a-ded6-44c8-8736-7ac3ccc665f7"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.030981 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a88b567a-ded6-44c8-8736-7ac3ccc665f7" (UID: "a88b567a-ded6-44c8-8736-7ac3ccc665f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.066388 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-config-data" (OuterVolumeSpecName: "config-data") pod "a88b567a-ded6-44c8-8736-7ac3ccc665f7" (UID: "a88b567a-ded6-44c8-8736-7ac3ccc665f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.109190 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.109264 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.109278 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.109291 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a88b567a-ded6-44c8-8736-7ac3ccc665f7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.109300 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5rqd\" (UniqueName: \"kubernetes.io/projected/a88b567a-ded6-44c8-8736-7ac3ccc665f7-kube-api-access-g5rqd\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.128601 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.211480 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: E0129 06:55:04.227014 4861 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:5a548c25fe3d02f7a042cb0a6d28fc8039a34c4a3b3d07aadda4aba3a926e777" Jan 29 06:55:04 crc kubenswrapper[4861]: E0129 06:55:04.227421 4861 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:5a548c25fe3d02f7a042cb0a6d28fc8039a34c4a3b3d07aadda4aba3a926e777,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndch55bhc4h59fhfdh85h59chb5h64chbdh546h54ch665h89h77h5d9h99h64bh55h68h66ch78h586h5dch664h656h659h57h5f7h64dhd9h5bbq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mjjx5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(7de4c2d2-3468-4522-8f5d-5acf0d1c4806): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.238759 4861 scope.go:117] "RemoveContainer" containerID="b18d8a3a28970581f54e1b3d38cdffcfb8298eaa5c39136dd7eefb911db2d039" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.254569 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.414704 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.415164 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-scripts\") pod \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.415207 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-config-data\") pod \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.415232 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-logs\") pod \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.415302 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzd2x\" (UniqueName: \"kubernetes.io/projected/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-kube-api-access-bzd2x\") pod \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.415324 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-combined-ca-bundle\") pod \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.415404 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-httpd-run\") pod \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\" (UID: \"5b4cf8ff-a136-4ecb-bb04-b44475d3832b\") " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.415840 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-logs" (OuterVolumeSpecName: "logs") pod "5b4cf8ff-a136-4ecb-bb04-b44475d3832b" (UID: "5b4cf8ff-a136-4ecb-bb04-b44475d3832b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.416009 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5b4cf8ff-a136-4ecb-bb04-b44475d3832b" (UID: "5b4cf8ff-a136-4ecb-bb04-b44475d3832b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.420034 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-kube-api-access-bzd2x" (OuterVolumeSpecName: "kube-api-access-bzd2x") pod "5b4cf8ff-a136-4ecb-bb04-b44475d3832b" (UID: "5b4cf8ff-a136-4ecb-bb04-b44475d3832b"). InnerVolumeSpecName "kube-api-access-bzd2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.420630 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-scripts" (OuterVolumeSpecName: "scripts") pod "5b4cf8ff-a136-4ecb-bb04-b44475d3832b" (UID: "5b4cf8ff-a136-4ecb-bb04-b44475d3832b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.421969 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "5b4cf8ff-a136-4ecb-bb04-b44475d3832b" (UID: "5b4cf8ff-a136-4ecb-bb04-b44475d3832b"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.444880 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b4cf8ff-a136-4ecb-bb04-b44475d3832b" (UID: "5b4cf8ff-a136-4ecb-bb04-b44475d3832b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.460316 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-config-data" (OuterVolumeSpecName: "config-data") pod "5b4cf8ff-a136-4ecb-bb04-b44475d3832b" (UID: "5b4cf8ff-a136-4ecb-bb04-b44475d3832b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.517115 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.517147 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.517178 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.517190 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.517199 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.517207 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.517216 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzd2x\" (UniqueName: \"kubernetes.io/projected/5b4cf8ff-a136-4ecb-bb04-b44475d3832b-kube-api-access-bzd2x\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.536027 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.619439 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.686729 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a88b567a-ded6-44c8-8736-7ac3ccc665f7","Type":"ContainerDied","Data":"e8dc3752c3691b802ff940938f8100fdd805ec2f92eeefa6859f1b8786f16271"} Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.686787 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.686794 4861 scope.go:117] "RemoveContainer" containerID="752086d556d86b2b95ab0e4ed9524ae139ddbea9ba153750505afc005bc1db9a" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.689047 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x75d6" event={"ID":"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe","Type":"ContainerStarted","Data":"1032eb6199237465104797eee73e2d72aa2b935e6b067c7d59fbd6d04743a636"} Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.708462 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" event={"ID":"fa374fd0-6b03-475d-8230-9fb2a9768091","Type":"ContainerStarted","Data":"afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73"} Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.708645 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.729422 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-x75d6" podStartSLOduration=2.607073429 podStartE2EDuration="27.729393441s" podCreationTimestamp="2026-01-29 06:54:37 +0000 UTC" firstStartedPulling="2026-01-29 06:54:39.143853586 +0000 UTC m=+1170.815348143" lastFinishedPulling="2026-01-29 06:55:04.266173608 +0000 UTC m=+1195.937668155" observedRunningTime="2026-01-29 06:55:04.71095682 +0000 UTC m=+1196.382451387" watchObservedRunningTime="2026-01-29 06:55:04.729393441 +0000 UTC m=+1196.400888018" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.729936 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.731840 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b4cf8ff-a136-4ecb-bb04-b44475d3832b","Type":"ContainerDied","Data":"ad05784e84ea463e29de0dab4109f5eb8d93a4c0cd36639365e1fecda34da32c"} Jan 29 06:55:04 crc kubenswrapper[4861]: E0129 06:55:04.742362 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49\\\"\"" pod="openstack/cinder-db-sync-c5jkq" podUID="8c6201c8-50a8-4b95-82e0-b944b78348d6" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.742451 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xqbhr"] Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.742393 4861 scope.go:117] "RemoveContainer" containerID="7578441fd0619de92995ab3b49ab32ffad625eeff9a2ad9ff3b70cbfc4b9e7c4" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.747664 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" podStartSLOduration=25.747647367 podStartE2EDuration="25.747647367s" podCreationTimestamp="2026-01-29 06:54:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:04.742952497 +0000 UTC m=+1196.414447114" watchObservedRunningTime="2026-01-29 06:55:04.747647367 +0000 UTC m=+1196.419141924" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.763777 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:55:04 crc kubenswrapper[4861]: W0129 06:55:04.770464 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd48d2111_d309_4775_b728_cdd8b7163ebc.slice/crio-c0b6655b4e2baa7e531d1259f951fb427cffa41ad654a6702f09aa5b4fecd67a WatchSource:0}: Error finding container c0b6655b4e2baa7e531d1259f951fb427cffa41ad654a6702f09aa5b4fecd67a: Status 404 returned error can't find the container with id c0b6655b4e2baa7e531d1259f951fb427cffa41ad654a6702f09aa5b4fecd67a Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.771242 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.802577 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:55:04 crc kubenswrapper[4861]: E0129 06:55:04.803374 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerName="glance-log" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.803508 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerName="glance-log" Jan 29 06:55:04 crc kubenswrapper[4861]: E0129 06:55:04.803636 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerName="glance-httpd" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.803700 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerName="glance-httpd" Jan 29 06:55:04 crc kubenswrapper[4861]: E0129 06:55:04.803771 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerName="glance-httpd" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.803837 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerName="glance-httpd" Jan 29 06:55:04 crc kubenswrapper[4861]: E0129 06:55:04.803919 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerName="glance-log" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.803978 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerName="glance-log" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.804275 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerName="glance-httpd" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.805491 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerName="glance-log" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.805580 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" containerName="glance-httpd" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.805692 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" containerName="glance-log" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.806978 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.813400 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-nnz82" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.813479 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.813599 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.813728 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.826860 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.913546 4861 scope.go:117] "RemoveContainer" containerID="2493fc0811f212bdea381ebe85b017b90079190578fcb6cc79a84dfe2071ef12" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.925329 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.925735 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.925888 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.925914 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-logs\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.926186 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xmsh\" (UniqueName: \"kubernetes.io/projected/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-kube-api-access-8xmsh\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.926214 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.926264 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.926301 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.961221 4861 scope.go:117] "RemoveContainer" containerID="4ed67a4d791aaf2962c06c57850309cf00bcc6e77e6f490c2fc39ff3df92af0c" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.965964 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.975994 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.984190 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.986253 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.991920 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.992207 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 06:55:04 crc kubenswrapper[4861]: I0129 06:55:04.999167 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.027882 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.027951 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.027986 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.028012 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.028053 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.028211 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.028233 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-logs\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.028282 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xmsh\" (UniqueName: \"kubernetes.io/projected/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-kube-api-access-8xmsh\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.028328 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.029310 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.029423 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-logs\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.032734 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.039169 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.039258 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.044231 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xmsh\" (UniqueName: \"kubernetes.io/projected/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-kube-api-access-8xmsh\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.057769 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.068755 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.129874 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.129978 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.130007 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mbl7\" (UniqueName: \"kubernetes.io/projected/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-kube-api-access-6mbl7\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.130025 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-logs\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.130062 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-scripts\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.130089 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-config-data\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.130109 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.130139 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.133112 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b4cf8ff-a136-4ecb-bb04-b44475d3832b" path="/var/lib/kubelet/pods/5b4cf8ff-a136-4ecb-bb04-b44475d3832b/volumes" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.133780 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a88b567a-ded6-44c8-8736-7ac3ccc665f7" path="/var/lib/kubelet/pods/a88b567a-ded6-44c8-8736-7ac3ccc665f7/volumes" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.134329 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be608de7-3ef1-4706-9527-d216de787b20" path="/var/lib/kubelet/pods/be608de7-3ef1-4706-9527-d216de787b20/volumes" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.231605 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.231666 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mbl7\" (UniqueName: \"kubernetes.io/projected/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-kube-api-access-6mbl7\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.231696 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-logs\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.231768 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-scripts\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.231789 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-config-data\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.231810 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.231849 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.231878 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.232052 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.232174 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-logs\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.232446 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.234288 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.237059 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-scripts\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.238249 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.238581 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.253344 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-config-data\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.257180 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mbl7\" (UniqueName: \"kubernetes.io/projected/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-kube-api-access-6mbl7\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.272798 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.311462 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.741096 4861 generic.go:334] "Generic (PLEG): container finished" podID="597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74" containerID="ba7f7b8aa0a69ef1edc92b25a04e8ad86840de8d862f70596abc49240736dd9c" exitCode=0 Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.741123 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8b6sf" event={"ID":"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74","Type":"ContainerDied","Data":"ba7f7b8aa0a69ef1edc92b25a04e8ad86840de8d862f70596abc49240736dd9c"} Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.743658 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xqbhr" event={"ID":"d48d2111-d309-4775-b728-cdd8b7163ebc","Type":"ContainerStarted","Data":"c3d836d7ff030fca36739ea9a5f282a8be67bbbe4c8a440b3b995a0d0c5dc317"} Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.743693 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xqbhr" event={"ID":"d48d2111-d309-4775-b728-cdd8b7163ebc","Type":"ContainerStarted","Data":"c0b6655b4e2baa7e531d1259f951fb427cffa41ad654a6702f09aa5b4fecd67a"} Jan 29 06:55:05 crc kubenswrapper[4861]: I0129 06:55:05.787263 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-xqbhr" podStartSLOduration=2.787240961 podStartE2EDuration="2.787240961s" podCreationTimestamp="2026-01-29 06:55:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:05.780252933 +0000 UTC m=+1197.451747500" watchObservedRunningTime="2026-01-29 06:55:05.787240961 +0000 UTC m=+1197.458735548" Jan 29 06:55:06 crc kubenswrapper[4861]: I0129 06:55:06.018232 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:55:06 crc kubenswrapper[4861]: W0129 06:55:06.029018 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad57ea59_d551_4e68_aaf6_7b2ee6ea739b.slice/crio-1a2eeb3b1bac6549a3e7dbd577241a19ccebaf204542e434abdeab2804fa70fc WatchSource:0}: Error finding container 1a2eeb3b1bac6549a3e7dbd577241a19ccebaf204542e434abdeab2804fa70fc: Status 404 returned error can't find the container with id 1a2eeb3b1bac6549a3e7dbd577241a19ccebaf204542e434abdeab2804fa70fc Jan 29 06:55:06 crc kubenswrapper[4861]: I0129 06:55:06.109541 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:55:06 crc kubenswrapper[4861]: W0129 06:55:06.116382 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4bf22b0a_5a8f_4f18_b259_8c43fac759dd.slice/crio-a935ebdeb201738e741fbc76e1cdcf8de98bedc486fd59184ce464857e702ba8 WatchSource:0}: Error finding container a935ebdeb201738e741fbc76e1cdcf8de98bedc486fd59184ce464857e702ba8: Status 404 returned error can't find the container with id a935ebdeb201738e741fbc76e1cdcf8de98bedc486fd59184ce464857e702ba8 Jan 29 06:55:06 crc kubenswrapper[4861]: I0129 06:55:06.775711 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4bf22b0a-5a8f-4f18-b259-8c43fac759dd","Type":"ContainerStarted","Data":"a37bdb21623a422cbac5028522ac71470fcf73cee302efbb609b5d8966142036"} Jan 29 06:55:06 crc kubenswrapper[4861]: I0129 06:55:06.775953 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4bf22b0a-5a8f-4f18-b259-8c43fac759dd","Type":"ContainerStarted","Data":"a935ebdeb201738e741fbc76e1cdcf8de98bedc486fd59184ce464857e702ba8"} Jan 29 06:55:06 crc kubenswrapper[4861]: I0129 06:55:06.793275 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7de4c2d2-3468-4522-8f5d-5acf0d1c4806","Type":"ContainerStarted","Data":"755619eeeb3119d470cf51a2e24d614ba425009925bd792e194519f0d1a42aaf"} Jan 29 06:55:06 crc kubenswrapper[4861]: I0129 06:55:06.799031 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b","Type":"ContainerStarted","Data":"830b8ad78a9cb455d1b976b843a3c03005c610d03cc564242cb5c13c4c9476d0"} Jan 29 06:55:06 crc kubenswrapper[4861]: I0129 06:55:06.799113 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b","Type":"ContainerStarted","Data":"1a2eeb3b1bac6549a3e7dbd577241a19ccebaf204542e434abdeab2804fa70fc"} Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.234369 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.370606 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-config\") pod \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.371020 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtz68\" (UniqueName: \"kubernetes.io/projected/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-kube-api-access-jtz68\") pod \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.371167 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-combined-ca-bundle\") pod \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\" (UID: \"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74\") " Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.378364 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-kube-api-access-jtz68" (OuterVolumeSpecName: "kube-api-access-jtz68") pod "597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74" (UID: "597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74"). InnerVolumeSpecName "kube-api-access-jtz68". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.406539 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-config" (OuterVolumeSpecName: "config") pod "597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74" (UID: "597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.450285 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74" (UID: "597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.474220 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.474263 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.474275 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtz68\" (UniqueName: \"kubernetes.io/projected/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74-kube-api-access-jtz68\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.815853 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4bf22b0a-5a8f-4f18-b259-8c43fac759dd","Type":"ContainerStarted","Data":"0b5e0b509d93b270a78efdf4453478a55b8536121a28cd51c081d4a2b3fbd06b"} Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.818343 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-8b6sf" event={"ID":"597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74","Type":"ContainerDied","Data":"3dd6e097122ebfee4673607e08493473c5a4f905de2518d38689bb9f4491821c"} Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.818399 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3dd6e097122ebfee4673607e08493473c5a4f905de2518d38689bb9f4491821c" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.818355 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-8b6sf" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.820054 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b","Type":"ContainerStarted","Data":"3ebe082356931eb76e7562873162d36ba549bea59f8d99bc8ac0a4cfd7a0f83a"} Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.822107 4861 generic.go:334] "Generic (PLEG): container finished" podID="e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe" containerID="1032eb6199237465104797eee73e2d72aa2b935e6b067c7d59fbd6d04743a636" exitCode=0 Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.822152 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x75d6" event={"ID":"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe","Type":"ContainerDied","Data":"1032eb6199237465104797eee73e2d72aa2b935e6b067c7d59fbd6d04743a636"} Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.838511 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.838492009 podStartE2EDuration="3.838492009s" podCreationTimestamp="2026-01-29 06:55:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:07.831754267 +0000 UTC m=+1199.503248824" watchObservedRunningTime="2026-01-29 06:55:07.838492009 +0000 UTC m=+1199.509986556" Jan 29 06:55:07 crc kubenswrapper[4861]: I0129 06:55:07.877557 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.877537996 podStartE2EDuration="3.877537996s" podCreationTimestamp="2026-01-29 06:55:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:07.877388982 +0000 UTC m=+1199.548883549" watchObservedRunningTime="2026-01-29 06:55:07.877537996 +0000 UTC m=+1199.549032553" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.007264 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-j6bzt"] Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.010219 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" podUID="fa374fd0-6b03-475d-8230-9fb2a9768091" containerName="dnsmasq-dns" containerID="cri-o://afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73" gracePeriod=10 Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.042046 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-685444497c-2hjbp"] Jan 29 06:55:08 crc kubenswrapper[4861]: E0129 06:55:08.042521 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74" containerName="neutron-db-sync" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.042540 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74" containerName="neutron-db-sync" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.042741 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74" containerName="neutron-db-sync" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.043595 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.081192 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-685444497c-2hjbp"] Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.098877 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-config\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.098916 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-nb\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.098941 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-svc\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.098977 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxqlg\" (UniqueName: \"kubernetes.io/projected/ec1f6ee9-55ac-465f-92c3-0b08506db348-kube-api-access-jxqlg\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.098998 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-sb\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.099048 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-swift-storage-0\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.119775 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-96c87b9b6-8r8w7"] Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.121063 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.139503 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-p72xd" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.139624 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.139667 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.139694 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.173363 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-96c87b9b6-8r8w7"] Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.200830 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-ovndb-tls-certs\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.200885 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-swift-storage-0\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.200915 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q9ft\" (UniqueName: \"kubernetes.io/projected/e3f66a07-77f8-476d-80bf-f6cc152cfd17-kube-api-access-4q9ft\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.201046 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-config\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.201096 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-nb\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.201158 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-svc\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.201213 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-config\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.201250 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxqlg\" (UniqueName: \"kubernetes.io/projected/ec1f6ee9-55ac-465f-92c3-0b08506db348-kube-api-access-jxqlg\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.201281 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-sb\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.201343 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-httpd-config\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.201379 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-combined-ca-bundle\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.202490 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-swift-storage-0\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.203135 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-svc\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.203251 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-sb\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.203758 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-config\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.204566 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-nb\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.225680 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxqlg\" (UniqueName: \"kubernetes.io/projected/ec1f6ee9-55ac-465f-92c3-0b08506db348-kube-api-access-jxqlg\") pod \"dnsmasq-dns-685444497c-2hjbp\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.302959 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-httpd-config\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.303009 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-combined-ca-bundle\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.303107 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-ovndb-tls-certs\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.303134 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q9ft\" (UniqueName: \"kubernetes.io/projected/e3f66a07-77f8-476d-80bf-f6cc152cfd17-kube-api-access-4q9ft\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.303247 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-config\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.308385 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-config\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.309177 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-combined-ca-bundle\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.312977 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-httpd-config\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.321535 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-ovndb-tls-certs\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.323670 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q9ft\" (UniqueName: \"kubernetes.io/projected/e3f66a07-77f8-476d-80bf-f6cc152cfd17-kube-api-access-4q9ft\") pod \"neutron-96c87b9b6-8r8w7\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.400008 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.524796 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.679233 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.810150 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-svc\") pod \"fa374fd0-6b03-475d-8230-9fb2a9768091\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.810204 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-nb\") pod \"fa374fd0-6b03-475d-8230-9fb2a9768091\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.810229 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlf84\" (UniqueName: \"kubernetes.io/projected/fa374fd0-6b03-475d-8230-9fb2a9768091-kube-api-access-hlf84\") pod \"fa374fd0-6b03-475d-8230-9fb2a9768091\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.810301 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-swift-storage-0\") pod \"fa374fd0-6b03-475d-8230-9fb2a9768091\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.810936 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-config\") pod \"fa374fd0-6b03-475d-8230-9fb2a9768091\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.810994 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-sb\") pod \"fa374fd0-6b03-475d-8230-9fb2a9768091\" (UID: \"fa374fd0-6b03-475d-8230-9fb2a9768091\") " Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.835246 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa374fd0-6b03-475d-8230-9fb2a9768091-kube-api-access-hlf84" (OuterVolumeSpecName: "kube-api-access-hlf84") pod "fa374fd0-6b03-475d-8230-9fb2a9768091" (UID: "fa374fd0-6b03-475d-8230-9fb2a9768091"). InnerVolumeSpecName "kube-api-access-hlf84". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.836240 4861 generic.go:334] "Generic (PLEG): container finished" podID="d48d2111-d309-4775-b728-cdd8b7163ebc" containerID="c3d836d7ff030fca36739ea9a5f282a8be67bbbe4c8a440b3b995a0d0c5dc317" exitCode=0 Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.836315 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xqbhr" event={"ID":"d48d2111-d309-4775-b728-cdd8b7163ebc","Type":"ContainerDied","Data":"c3d836d7ff030fca36739ea9a5f282a8be67bbbe4c8a440b3b995a0d0c5dc317"} Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.843043 4861 generic.go:334] "Generic (PLEG): container finished" podID="fa374fd0-6b03-475d-8230-9fb2a9768091" containerID="afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73" exitCode=0 Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.843465 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" event={"ID":"fa374fd0-6b03-475d-8230-9fb2a9768091","Type":"ContainerDied","Data":"afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73"} Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.843504 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" event={"ID":"fa374fd0-6b03-475d-8230-9fb2a9768091","Type":"ContainerDied","Data":"ddf002d2b21cac7beb20b97fe76c1d0bda80dbc4238c13fb2244922405a64059"} Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.843522 4861 scope.go:117] "RemoveContainer" containerID="afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.843638 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6f8cb849-j6bzt" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.890097 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fa374fd0-6b03-475d-8230-9fb2a9768091" (UID: "fa374fd0-6b03-475d-8230-9fb2a9768091"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.909006 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fa374fd0-6b03-475d-8230-9fb2a9768091" (UID: "fa374fd0-6b03-475d-8230-9fb2a9768091"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.912900 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.912928 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.912938 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlf84\" (UniqueName: \"kubernetes.io/projected/fa374fd0-6b03-475d-8230-9fb2a9768091-kube-api-access-hlf84\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.919235 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "fa374fd0-6b03-475d-8230-9fb2a9768091" (UID: "fa374fd0-6b03-475d-8230-9fb2a9768091"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.948993 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fa374fd0-6b03-475d-8230-9fb2a9768091" (UID: "fa374fd0-6b03-475d-8230-9fb2a9768091"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.954873 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-config" (OuterVolumeSpecName: "config") pod "fa374fd0-6b03-475d-8230-9fb2a9768091" (UID: "fa374fd0-6b03-475d-8230-9fb2a9768091"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:08 crc kubenswrapper[4861]: I0129 06:55:08.996310 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-685444497c-2hjbp"] Jan 29 06:55:09 crc kubenswrapper[4861]: W0129 06:55:09.009802 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec1f6ee9_55ac_465f_92c3_0b08506db348.slice/crio-32fce20a353117369c76b7d4fc4bc1430d85c90e11267785f9cd032e4a1d18d8 WatchSource:0}: Error finding container 32fce20a353117369c76b7d4fc4bc1430d85c90e11267785f9cd032e4a1d18d8: Status 404 returned error can't find the container with id 32fce20a353117369c76b7d4fc4bc1430d85c90e11267785f9cd032e4a1d18d8 Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.014597 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.014615 4861 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.014626 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa374fd0-6b03-475d-8230-9fb2a9768091-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.040388 4861 scope.go:117] "RemoveContainer" containerID="c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.091509 4861 scope.go:117] "RemoveContainer" containerID="afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73" Jan 29 06:55:09 crc kubenswrapper[4861]: E0129 06:55:09.091912 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73\": container with ID starting with afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73 not found: ID does not exist" containerID="afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.091959 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73"} err="failed to get container status \"afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73\": rpc error: code = NotFound desc = could not find container \"afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73\": container with ID starting with afcbd675770c42406dc831d8a35732060b2a81b2c778b3376ec163b892c96b73 not found: ID does not exist" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.091986 4861 scope.go:117] "RemoveContainer" containerID="c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32" Jan 29 06:55:09 crc kubenswrapper[4861]: E0129 06:55:09.092537 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32\": container with ID starting with c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32 not found: ID does not exist" containerID="c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.092568 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32"} err="failed to get container status \"c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32\": rpc error: code = NotFound desc = could not find container \"c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32\": container with ID starting with c722e0ac492dfce7d20f478396846ea28ca925b44be298b2806b3de4b106af32 not found: ID does not exist" Jan 29 06:55:09 crc kubenswrapper[4861]: E0129 06:55:09.134993 4861 info.go:109] Failed to get network devices: open /sys/class/net/c9348043bb67229/address: no such file or directory Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.190957 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-j6bzt"] Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.191924 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-j6bzt"] Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.240436 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-96c87b9b6-8r8w7"] Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.274865 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x75d6" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.330065 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-combined-ca-bundle\") pod \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.330171 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrljs\" (UniqueName: \"kubernetes.io/projected/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-kube-api-access-jrljs\") pod \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.330295 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-db-sync-config-data\") pod \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\" (UID: \"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe\") " Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.337299 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-kube-api-access-jrljs" (OuterVolumeSpecName: "kube-api-access-jrljs") pod "e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe" (UID: "e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe"). InnerVolumeSpecName "kube-api-access-jrljs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.341095 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe" (UID: "e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.358656 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe" (UID: "e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.432884 4861 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.432920 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.432929 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrljs\" (UniqueName: \"kubernetes.io/projected/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe-kube-api-access-jrljs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.862592 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x75d6" event={"ID":"e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe","Type":"ContainerDied","Data":"c9348043bb67229cd9e7813a5bcaeed6d1baa8f1c993fb695c302adf02b83012"} Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.862871 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9348043bb67229cd9e7813a5bcaeed6d1baa8f1c993fb695c302adf02b83012" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.862617 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x75d6" Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.871538 4861 generic.go:334] "Generic (PLEG): container finished" podID="ec1f6ee9-55ac-465f-92c3-0b08506db348" containerID="e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4" exitCode=0 Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.871636 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-2hjbp" event={"ID":"ec1f6ee9-55ac-465f-92c3-0b08506db348","Type":"ContainerDied","Data":"e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4"} Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.871662 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-2hjbp" event={"ID":"ec1f6ee9-55ac-465f-92c3-0b08506db348","Type":"ContainerStarted","Data":"32fce20a353117369c76b7d4fc4bc1430d85c90e11267785f9cd032e4a1d18d8"} Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.884575 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-96c87b9b6-8r8w7" event={"ID":"e3f66a07-77f8-476d-80bf-f6cc152cfd17","Type":"ContainerStarted","Data":"48d76bc12530564c4b2eebc89cc5dfe44093d2a0f960d299491520d1b1bce1ec"} Jan 29 06:55:09 crc kubenswrapper[4861]: I0129 06:55:09.884623 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-96c87b9b6-8r8w7" event={"ID":"e3f66a07-77f8-476d-80bf-f6cc152cfd17","Type":"ContainerStarted","Data":"818682027ce0ed5a7b7fae2e9118020dfbfc79320f2727273da6a82672e866f5"} Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.165534 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-75945d9bdb-6szss"] Jan 29 06:55:10 crc kubenswrapper[4861]: E0129 06:55:10.167422 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa374fd0-6b03-475d-8230-9fb2a9768091" containerName="init" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.167464 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa374fd0-6b03-475d-8230-9fb2a9768091" containerName="init" Jan 29 06:55:10 crc kubenswrapper[4861]: E0129 06:55:10.167481 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa374fd0-6b03-475d-8230-9fb2a9768091" containerName="dnsmasq-dns" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.167489 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa374fd0-6b03-475d-8230-9fb2a9768091" containerName="dnsmasq-dns" Jan 29 06:55:10 crc kubenswrapper[4861]: E0129 06:55:10.167510 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe" containerName="barbican-db-sync" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.167517 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe" containerName="barbican-db-sync" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.167741 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa374fd0-6b03-475d-8230-9fb2a9768091" containerName="dnsmasq-dns" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.167763 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe" containerName="barbican-db-sync" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.168642 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.171467 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-brpvh" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.172599 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.172777 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.185265 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-54cf866f89-b4nzm"] Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.186756 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.191020 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.202053 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-75945d9bdb-6szss"] Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.220480 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-54cf866f89-b4nzm"] Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248360 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data-custom\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248445 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcvx6\" (UniqueName: \"kubernetes.io/projected/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-kube-api-access-lcvx6\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248476 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248508 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htr4x\" (UniqueName: \"kubernetes.io/projected/a71f5c10-5dfd-4620-a5a9-e44593f90221-kube-api-access-htr4x\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248543 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-logs\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248567 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-combined-ca-bundle\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248587 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data-custom\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248620 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248654 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-combined-ca-bundle\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.248688 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a71f5c10-5dfd-4620-a5a9-e44593f90221-logs\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.297273 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-685444497c-2hjbp"] Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.316853 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-98k9x"] Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.325681 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.347442 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-98k9x"] Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350112 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-combined-ca-bundle\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350150 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data-custom\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350180 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-config\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350207 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350223 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-sb\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350269 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-combined-ca-bundle\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350303 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a71f5c10-5dfd-4620-a5a9-e44593f90221-logs\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350324 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-svc\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350358 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dsqq\" (UniqueName: \"kubernetes.io/projected/946bf576-dbb6-4284-9794-8330f6213430-kube-api-access-9dsqq\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350378 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-swift-storage-0\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350422 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data-custom\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350470 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcvx6\" (UniqueName: \"kubernetes.io/projected/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-kube-api-access-lcvx6\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350494 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350518 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htr4x\" (UniqueName: \"kubernetes.io/projected/a71f5c10-5dfd-4620-a5a9-e44593f90221-kube-api-access-htr4x\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350541 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-nb\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350566 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-logs\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.350971 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-logs\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.358571 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-combined-ca-bundle\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.359776 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data-custom\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.359979 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a71f5c10-5dfd-4620-a5a9-e44593f90221-logs\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.376561 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data-custom\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.388538 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.390694 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-combined-ca-bundle\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.398066 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcvx6\" (UniqueName: \"kubernetes.io/projected/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-kube-api-access-lcvx6\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.398621 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htr4x\" (UniqueName: \"kubernetes.io/projected/a71f5c10-5dfd-4620-a5a9-e44593f90221-kube-api-access-htr4x\") pod \"barbican-worker-54cf866f89-b4nzm\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.398809 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data\") pod \"barbican-keystone-listener-75945d9bdb-6szss\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.434665 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-d9d5fc54d-kq4c4"] Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.436207 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.448080 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.451559 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-svc\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.451613 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dsqq\" (UniqueName: \"kubernetes.io/projected/946bf576-dbb6-4284-9794-8330f6213430-kube-api-access-9dsqq\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.451633 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-swift-storage-0\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.451705 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-nb\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.451741 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-config\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.452316 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-sb\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.453054 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-sb\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.459897 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-svc\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.461367 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-nb\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.462512 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-swift-storage-0\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.468057 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-config\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.471612 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d9d5fc54d-kq4c4"] Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.489759 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dsqq\" (UniqueName: \"kubernetes.io/projected/946bf576-dbb6-4284-9794-8330f6213430-kube-api-access-9dsqq\") pod \"dnsmasq-dns-66cdd4b5b5-98k9x\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.534579 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.539902 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.553699 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data-custom\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.553813 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfxcn\" (UniqueName: \"kubernetes.io/projected/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-kube-api-access-tfxcn\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.553838 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-combined-ca-bundle\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.553884 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.553901 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-logs\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.644748 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.654755 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfxcn\" (UniqueName: \"kubernetes.io/projected/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-kube-api-access-tfxcn\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.654805 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-combined-ca-bundle\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.654862 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.654883 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-logs\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.654932 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data-custom\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.656054 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-logs\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.660681 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-combined-ca-bundle\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.660927 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data-custom\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.663019 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.672675 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfxcn\" (UniqueName: \"kubernetes.io/projected/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-kube-api-access-tfxcn\") pod \"barbican-api-d9d5fc54d-kq4c4\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:10 crc kubenswrapper[4861]: I0129 06:55:10.784017 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.137538 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa374fd0-6b03-475d-8230-9fb2a9768091" path="/var/lib/kubelet/pods/fa374fd0-6b03-475d-8230-9fb2a9768091/volumes" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.280286 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6896cf67f5-ztfst"] Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.281855 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.303317 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.303556 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.314380 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6896cf67f5-ztfst"] Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.358816 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-combined-ca-bundle\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.358851 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-ovndb-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.358884 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zs64\" (UniqueName: \"kubernetes.io/projected/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-kube-api-access-8zs64\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.358899 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-internal-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.358949 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-httpd-config\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.358979 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-public-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.359001 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-config\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.460930 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-httpd-config\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.460991 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-public-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.461016 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-config\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.461107 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-combined-ca-bundle\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.461128 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-ovndb-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.461285 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zs64\" (UniqueName: \"kubernetes.io/projected/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-kube-api-access-8zs64\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.461309 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-internal-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.469267 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-combined-ca-bundle\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.469594 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-config\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.469713 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-internal-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.470200 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-public-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.474151 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-httpd-config\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.492619 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-ovndb-tls-certs\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.494213 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zs64\" (UniqueName: \"kubernetes.io/projected/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-kube-api-access-8zs64\") pod \"neutron-6896cf67f5-ztfst\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:11 crc kubenswrapper[4861]: I0129 06:55:11.650535 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.163794 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.172524 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-credential-keys\") pod \"d48d2111-d309-4775-b728-cdd8b7163ebc\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.172602 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-config-data\") pod \"d48d2111-d309-4775-b728-cdd8b7163ebc\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.172664 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-fernet-keys\") pod \"d48d2111-d309-4775-b728-cdd8b7163ebc\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.172736 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-combined-ca-bundle\") pod \"d48d2111-d309-4775-b728-cdd8b7163ebc\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.172765 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-scripts\") pod \"d48d2111-d309-4775-b728-cdd8b7163ebc\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.172792 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xt2pn\" (UniqueName: \"kubernetes.io/projected/d48d2111-d309-4775-b728-cdd8b7163ebc-kube-api-access-xt2pn\") pod \"d48d2111-d309-4775-b728-cdd8b7163ebc\" (UID: \"d48d2111-d309-4775-b728-cdd8b7163ebc\") " Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.177931 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "d48d2111-d309-4775-b728-cdd8b7163ebc" (UID: "d48d2111-d309-4775-b728-cdd8b7163ebc"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.178150 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d48d2111-d309-4775-b728-cdd8b7163ebc-kube-api-access-xt2pn" (OuterVolumeSpecName: "kube-api-access-xt2pn") pod "d48d2111-d309-4775-b728-cdd8b7163ebc" (UID: "d48d2111-d309-4775-b728-cdd8b7163ebc"). InnerVolumeSpecName "kube-api-access-xt2pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.178896 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-scripts" (OuterVolumeSpecName: "scripts") pod "d48d2111-d309-4775-b728-cdd8b7163ebc" (UID: "d48d2111-d309-4775-b728-cdd8b7163ebc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.195251 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d48d2111-d309-4775-b728-cdd8b7163ebc" (UID: "d48d2111-d309-4775-b728-cdd8b7163ebc"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.213019 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d48d2111-d309-4775-b728-cdd8b7163ebc" (UID: "d48d2111-d309-4775-b728-cdd8b7163ebc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.228825 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-config-data" (OuterVolumeSpecName: "config-data") pod "d48d2111-d309-4775-b728-cdd8b7163ebc" (UID: "d48d2111-d309-4775-b728-cdd8b7163ebc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.274957 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.274990 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.274999 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xt2pn\" (UniqueName: \"kubernetes.io/projected/d48d2111-d309-4775-b728-cdd8b7163ebc-kube-api-access-xt2pn\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.275009 4861 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.275018 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.275029 4861 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d48d2111-d309-4775-b728-cdd8b7163ebc-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.908322 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xqbhr" event={"ID":"d48d2111-d309-4775-b728-cdd8b7163ebc","Type":"ContainerDied","Data":"c0b6655b4e2baa7e531d1259f951fb427cffa41ad654a6702f09aa5b4fecd67a"} Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.908360 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0b6655b4e2baa7e531d1259f951fb427cffa41ad654a6702f09aa5b4fecd67a" Jan 29 06:55:12 crc kubenswrapper[4861]: I0129 06:55:12.908400 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xqbhr" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.312599 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5b7fd56548-f8c7z"] Jan 29 06:55:13 crc kubenswrapper[4861]: E0129 06:55:13.312945 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48d2111-d309-4775-b728-cdd8b7163ebc" containerName="keystone-bootstrap" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.312956 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48d2111-d309-4775-b728-cdd8b7163ebc" containerName="keystone-bootstrap" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.315415 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d48d2111-d309-4775-b728-cdd8b7163ebc" containerName="keystone-bootstrap" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.316015 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.318744 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.319025 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.319151 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xp8pr" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.319244 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.319350 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.319448 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.324149 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b7fd56548-f8c7z"] Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.413098 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-config-data\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.413419 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-public-tls-certs\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.413479 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-internal-tls-certs\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.413537 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-fernet-keys\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.413563 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-combined-ca-bundle\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.413595 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8q9w\" (UniqueName: \"kubernetes.io/projected/27188e95-6192-4569-b254-c1e2d9b28086-kube-api-access-z8q9w\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.413620 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-credential-keys\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.413672 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-scripts\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.515365 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8q9w\" (UniqueName: \"kubernetes.io/projected/27188e95-6192-4569-b254-c1e2d9b28086-kube-api-access-z8q9w\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.515410 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-credential-keys\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.515755 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-scripts\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.515818 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-config-data\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.515838 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-public-tls-certs\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.515875 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-internal-tls-certs\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.515920 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-fernet-keys\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.515942 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-combined-ca-bundle\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.520761 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-internal-tls-certs\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.520989 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-credential-keys\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.521379 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-fernet-keys\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.521848 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-config-data\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.523711 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-combined-ca-bundle\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.525502 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-public-tls-certs\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.525635 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-scripts\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.536281 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8q9w\" (UniqueName: \"kubernetes.io/projected/27188e95-6192-4569-b254-c1e2d9b28086-kube-api-access-z8q9w\") pod \"keystone-5b7fd56548-f8c7z\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:13 crc kubenswrapper[4861]: I0129 06:55:13.644460 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.097997 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-c8ddb67f8-pqqd9"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.100475 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.112117 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6b8d96575c-7zzfv"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.117343 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.170975 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-c8ddb67f8-pqqd9"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.198851 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6b8d96575c-7zzfv"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234359 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-combined-ca-bundle\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234441 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data-custom\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234490 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data-custom\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234520 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234560 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9ed061-0329-42e0-8cca-e7b560c7a19c-logs\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234580 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234606 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-logs\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234642 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-combined-ca-bundle\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234660 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhn6r\" (UniqueName: \"kubernetes.io/projected/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-kube-api-access-dhn6r\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.234679 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gclbw\" (UniqueName: \"kubernetes.io/projected/bd9ed061-0329-42e0-8cca-e7b560c7a19c-kube-api-access-gclbw\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.260065 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5977db5ddd-9kzf9"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.273109 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366015 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-combined-ca-bundle\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366122 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366149 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c10aa65c-076c-4150-a573-d945be1b9c58-logs\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366175 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data-custom\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366204 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data-custom\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366230 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-combined-ca-bundle\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366278 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9ed061-0329-42e0-8cca-e7b560c7a19c-logs\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366297 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data-custom\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366313 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366333 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-logs\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366363 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-combined-ca-bundle\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366382 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhn6r\" (UniqueName: \"kubernetes.io/projected/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-kube-api-access-dhn6r\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366402 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dbpd\" (UniqueName: \"kubernetes.io/projected/c10aa65c-076c-4150-a573-d945be1b9c58-kube-api-access-7dbpd\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.366419 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gclbw\" (UniqueName: \"kubernetes.io/projected/bd9ed061-0329-42e0-8cca-e7b560c7a19c-kube-api-access-gclbw\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.367123 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9ed061-0329-42e0-8cca-e7b560c7a19c-logs\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.370342 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-logs\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.384661 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data-custom\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.385547 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data-custom\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.386504 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-combined-ca-bundle\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.389542 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.390163 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-combined-ca-bundle\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.390276 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gclbw\" (UniqueName: \"kubernetes.io/projected/bd9ed061-0329-42e0-8cca-e7b560c7a19c-kube-api-access-gclbw\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.413580 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhn6r\" (UniqueName: \"kubernetes.io/projected/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-kube-api-access-dhn6r\") pod \"barbican-worker-6b8d96575c-7zzfv\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.419089 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data\") pod \"barbican-keystone-listener-c8ddb67f8-pqqd9\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.420759 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5977db5ddd-9kzf9"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.453487 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.469228 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dbpd\" (UniqueName: \"kubernetes.io/projected/c10aa65c-076c-4150-a573-d945be1b9c58-kube-api-access-7dbpd\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.469809 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.470299 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c10aa65c-076c-4150-a573-d945be1b9c58-logs\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.470406 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-combined-ca-bundle\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.470472 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data-custom\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.471188 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c10aa65c-076c-4150-a573-d945be1b9c58-logs\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.474495 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data-custom\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.476638 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.480591 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-combined-ca-bundle\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.491292 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.496525 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dbpd\" (UniqueName: \"kubernetes.io/projected/c10aa65c-076c-4150-a573-d945be1b9c58-kube-api-access-7dbpd\") pod \"barbican-api-5977db5ddd-9kzf9\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.608547 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-75945d9bdb-6szss"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.612945 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:14 crc kubenswrapper[4861]: W0129 06:55:14.621880 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3e93bc3_461f_4bc0_ad61_f3c0d745f328.slice/crio-787f206c66905f39da63ffec71278974810ed6568d0de1b73df1bd82eb90638b WatchSource:0}: Error finding container 787f206c66905f39da63ffec71278974810ed6568d0de1b73df1bd82eb90638b: Status 404 returned error can't find the container with id 787f206c66905f39da63ffec71278974810ed6568d0de1b73df1bd82eb90638b Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.774243 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d9d5fc54d-kq4c4"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.790548 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-54cf866f89-b4nzm"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.865368 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-d8b667488-v7lmh"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.866720 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.887034 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.887743 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.909549 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d8b667488-v7lmh"] Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.954826 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-54cf866f89-b4nzm" event={"ID":"a71f5c10-5dfd-4620-a5a9-e44593f90221","Type":"ContainerStarted","Data":"c941265dddd263181596b8aa10a8cbb145a206fc78ba2d11e018dbdc52bb0091"} Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.969382 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" event={"ID":"e3e93bc3-461f-4bc0-ad61-f3c0d745f328","Type":"ContainerStarted","Data":"787f206c66905f39da63ffec71278974810ed6568d0de1b73df1bd82eb90638b"} Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.972775 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-2hjbp" event={"ID":"ec1f6ee9-55ac-465f-92c3-0b08506db348","Type":"ContainerStarted","Data":"01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b"} Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.972936 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-685444497c-2hjbp" podUID="ec1f6ee9-55ac-465f-92c3-0b08506db348" containerName="dnsmasq-dns" containerID="cri-o://01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b" gracePeriod=10 Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.973458 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.985738 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-96c87b9b6-8r8w7" event={"ID":"e3f66a07-77f8-476d-80bf-f6cc152cfd17","Type":"ContainerStarted","Data":"484505ca42014af98f6cbd0a6ac9cd3f6acfa09af05357b188ddd191011787db"} Jan 29 06:55:14 crc kubenswrapper[4861]: I0129 06:55:14.987484 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.007367 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zxj2\" (UniqueName: \"kubernetes.io/projected/947c222c-8f0c-423f-84e8-75a4b9322829-kube-api-access-4zxj2\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.007453 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-public-tls-certs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.007496 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-internal-tls-certs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.007542 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/947c222c-8f0c-423f-84e8-75a4b9322829-logs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.007569 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-combined-ca-bundle\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.007599 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data-custom\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.007626 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.015829 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-685444497c-2hjbp" podStartSLOduration=7.015806225 podStartE2EDuration="7.015806225s" podCreationTimestamp="2026-01-29 06:55:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:15.007011301 +0000 UTC m=+1206.678505878" watchObservedRunningTime="2026-01-29 06:55:15.015806225 +0000 UTC m=+1206.687300782" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.056495 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-98k9x"] Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.074779 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-96c87b9b6-8r8w7" podStartSLOduration=7.074764051 podStartE2EDuration="7.074764051s" podCreationTimestamp="2026-01-29 06:55:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:15.040503776 +0000 UTC m=+1206.711998333" watchObservedRunningTime="2026-01-29 06:55:15.074764051 +0000 UTC m=+1206.746258608" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.109765 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zxj2\" (UniqueName: \"kubernetes.io/projected/947c222c-8f0c-423f-84e8-75a4b9322829-kube-api-access-4zxj2\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.109853 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-public-tls-certs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.109929 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-internal-tls-certs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.110001 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/947c222c-8f0c-423f-84e8-75a4b9322829-logs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.110026 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-combined-ca-bundle\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.110081 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data-custom\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.110103 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.111772 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/947c222c-8f0c-423f-84e8-75a4b9322829-logs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.116366 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-internal-tls-certs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.119770 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.123876 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-public-tls-certs\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.136086 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zxj2\" (UniqueName: \"kubernetes.io/projected/947c222c-8f0c-423f-84e8-75a4b9322829-kube-api-access-4zxj2\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.143193 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data-custom\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.153445 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-combined-ca-bundle\") pod \"barbican-api-d8b667488-v7lmh\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.157732 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b7fd56548-f8c7z"] Jan 29 06:55:15 crc kubenswrapper[4861]: W0129 06:55:15.193310 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27188e95_6192_4569_b254_c1e2d9b28086.slice/crio-5284c41c7efbbae3ed2c2474677c06318dba9f788a1f7ce53235f393ed83b41e WatchSource:0}: Error finding container 5284c41c7efbbae3ed2c2474677c06318dba9f788a1f7ce53235f393ed83b41e: Status 404 returned error can't find the container with id 5284c41c7efbbae3ed2c2474677c06318dba9f788a1f7ce53235f393ed83b41e Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.232453 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.232869 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.233541 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.296782 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.315276 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.315643 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.343345 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d9d5fc54d-kq4c4"] Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.345588 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.358268 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-c8ddb67f8-pqqd9"] Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.399742 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.431665 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6896cf67f5-ztfst"] Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.467710 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6b8d96575c-7zzfv"] Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.473956 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 06:55:15 crc kubenswrapper[4861]: W0129 06:55:15.536576 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb670e77a_b666_4fe3_bc2f_8ceb13c819a0.slice/crio-eca500d9532c68f4405781394577ef6fbfd5ae83758e87b3864a35868b7052fb WatchSource:0}: Error finding container eca500d9532c68f4405781394577ef6fbfd5ae83758e87b3864a35868b7052fb: Status 404 returned error can't find the container with id eca500d9532c68f4405781394577ef6fbfd5ae83758e87b3864a35868b7052fb Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.543528 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5977db5ddd-9kzf9"] Jan 29 06:55:15 crc kubenswrapper[4861]: W0129 06:55:15.556952 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09dd2891_14bd_4b67_a7d8_26d74fcaa6a3.slice/crio-f80d75578861477ec8aac21ea801e295ac62635998a5c081df5e90b95cdd1e45 WatchSource:0}: Error finding container f80d75578861477ec8aac21ea801e295ac62635998a5c081df5e90b95cdd1e45: Status 404 returned error can't find the container with id f80d75578861477ec8aac21ea801e295ac62635998a5c081df5e90b95cdd1e45 Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.838835 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.933400 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-svc\") pod \"ec1f6ee9-55ac-465f-92c3-0b08506db348\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.933523 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-sb\") pod \"ec1f6ee9-55ac-465f-92c3-0b08506db348\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.933547 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-nb\") pod \"ec1f6ee9-55ac-465f-92c3-0b08506db348\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.933587 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxqlg\" (UniqueName: \"kubernetes.io/projected/ec1f6ee9-55ac-465f-92c3-0b08506db348-kube-api-access-jxqlg\") pod \"ec1f6ee9-55ac-465f-92c3-0b08506db348\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.933693 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-config\") pod \"ec1f6ee9-55ac-465f-92c3-0b08506db348\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.933791 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-swift-storage-0\") pod \"ec1f6ee9-55ac-465f-92c3-0b08506db348\" (UID: \"ec1f6ee9-55ac-465f-92c3-0b08506db348\") " Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.959687 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec1f6ee9-55ac-465f-92c3-0b08506db348-kube-api-access-jxqlg" (OuterVolumeSpecName: "kube-api-access-jxqlg") pod "ec1f6ee9-55ac-465f-92c3-0b08506db348" (UID: "ec1f6ee9-55ac-465f-92c3-0b08506db348"). InnerVolumeSpecName "kube-api-access-jxqlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:15 crc kubenswrapper[4861]: I0129 06:55:15.963512 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d8b667488-v7lmh"] Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.013973 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7de4c2d2-3468-4522-8f5d-5acf0d1c4806","Type":"ContainerStarted","Data":"adf5a98d31ec7921da169da60ea413217b543186b9c30f6c3c4531f922be78c6"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.016390 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d9d5fc54d-kq4c4" event={"ID":"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3","Type":"ContainerStarted","Data":"8b8373531f81b9135e02abdc9412d8af465a5956774f5229be64485a0583adb7"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.018415 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-g89td" event={"ID":"d200c5c2-f7a9-4db9-b65c-18658065131d","Type":"ContainerStarted","Data":"048dbf5066137bb4e571a0ddf6f23cd5da6fb05d04084cbec4c293105909f316"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.020482 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d8b667488-v7lmh" event={"ID":"947c222c-8f0c-423f-84e8-75a4b9322829","Type":"ContainerStarted","Data":"639edb4f6a5c377e539d7cca9bcaab3216019cbcb5b738ab3ab9a41b56be3aef"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.021412 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b8d96575c-7zzfv" event={"ID":"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3","Type":"ContainerStarted","Data":"f80d75578861477ec8aac21ea801e295ac62635998a5c081df5e90b95cdd1e45"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.023126 4861 generic.go:334] "Generic (PLEG): container finished" podID="ec1f6ee9-55ac-465f-92c3-0b08506db348" containerID="01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b" exitCode=0 Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.023165 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-2hjbp" event={"ID":"ec1f6ee9-55ac-465f-92c3-0b08506db348","Type":"ContainerDied","Data":"01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.023180 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-2hjbp" event={"ID":"ec1f6ee9-55ac-465f-92c3-0b08506db348","Type":"ContainerDied","Data":"32fce20a353117369c76b7d4fc4bc1430d85c90e11267785f9cd032e4a1d18d8"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.023196 4861 scope.go:117] "RemoveContainer" containerID="01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.023286 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-685444497c-2hjbp" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.025032 4861 generic.go:334] "Generic (PLEG): container finished" podID="946bf576-dbb6-4284-9794-8330f6213430" containerID="96a5f8f30b42d1d9a073bc7865d209c3c2028cf79d35e54a60c32d5021bed97e" exitCode=0 Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.025072 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" event={"ID":"946bf576-dbb6-4284-9794-8330f6213430","Type":"ContainerDied","Data":"96a5f8f30b42d1d9a073bc7865d209c3c2028cf79d35e54a60c32d5021bed97e"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.025087 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" event={"ID":"946bf576-dbb6-4284-9794-8330f6213430","Type":"ContainerStarted","Data":"fc391acc8982161c95293203473109aa3f114ee676eef921f6aa4da214bb8ec0"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.027910 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6896cf67f5-ztfst" event={"ID":"b670e77a-b666-4fe3-bc2f-8ceb13c819a0","Type":"ContainerStarted","Data":"eca500d9532c68f4405781394577ef6fbfd5ae83758e87b3864a35868b7052fb"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.036895 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-g89td" podStartSLOduration=3.405780221 podStartE2EDuration="39.036875248s" podCreationTimestamp="2026-01-29 06:54:37 +0000 UTC" firstStartedPulling="2026-01-29 06:54:39.197703721 +0000 UTC m=+1170.869198278" lastFinishedPulling="2026-01-29 06:55:14.828798748 +0000 UTC m=+1206.500293305" observedRunningTime="2026-01-29 06:55:16.036853417 +0000 UTC m=+1207.708347984" watchObservedRunningTime="2026-01-29 06:55:16.036875248 +0000 UTC m=+1207.708369805" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.042721 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b7fd56548-f8c7z" event={"ID":"27188e95-6192-4569-b254-c1e2d9b28086","Type":"ContainerStarted","Data":"9d22b040aef5f1212a99cca021391b8a04401e06ef3cba31d31cf1356747f059"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.042767 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.042777 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b7fd56548-f8c7z" event={"ID":"27188e95-6192-4569-b254-c1e2d9b28086","Type":"ContainerStarted","Data":"5284c41c7efbbae3ed2c2474677c06318dba9f788a1f7ce53235f393ed83b41e"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.045055 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxqlg\" (UniqueName: \"kubernetes.io/projected/ec1f6ee9-55ac-465f-92c3-0b08506db348-kube-api-access-jxqlg\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.051624 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" event={"ID":"bd9ed061-0329-42e0-8cca-e7b560c7a19c","Type":"ContainerStarted","Data":"fceb21f2b2390152827167d4a2d6ba481059545e6ee770eebaf47b6ac1c3d8cd"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.058416 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5977db5ddd-9kzf9" event={"ID":"c10aa65c-076c-4150-a573-d945be1b9c58","Type":"ContainerStarted","Data":"f3296b96d29dfc65ccdfd55871dd085f3ad9dca1b7dbc73ba3fba9fba5f8f5b9"} Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.059418 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.059442 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.059451 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.059460 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.094055 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5b7fd56548-f8c7z" podStartSLOduration=3.094037888 podStartE2EDuration="3.094037888s" podCreationTimestamp="2026-01-29 06:55:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:16.085045158 +0000 UTC m=+1207.756539715" watchObservedRunningTime="2026-01-29 06:55:16.094037888 +0000 UTC m=+1207.765532445" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.380624 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ec1f6ee9-55ac-465f-92c3-0b08506db348" (UID: "ec1f6ee9-55ac-465f-92c3-0b08506db348"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.387448 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ec1f6ee9-55ac-465f-92c3-0b08506db348" (UID: "ec1f6ee9-55ac-465f-92c3-0b08506db348"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.405229 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ec1f6ee9-55ac-465f-92c3-0b08506db348" (UID: "ec1f6ee9-55ac-465f-92c3-0b08506db348"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.408427 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-config" (OuterVolumeSpecName: "config") pod "ec1f6ee9-55ac-465f-92c3-0b08506db348" (UID: "ec1f6ee9-55ac-465f-92c3-0b08506db348"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.426605 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ec1f6ee9-55ac-465f-92c3-0b08506db348" (UID: "ec1f6ee9-55ac-465f-92c3-0b08506db348"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.452945 4861 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.452984 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.452995 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.453013 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.453030 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1f6ee9-55ac-465f-92c3-0b08506db348-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.674778 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-685444497c-2hjbp"] Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.682597 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-685444497c-2hjbp"] Jan 29 06:55:16 crc kubenswrapper[4861]: I0129 06:55:16.981896 4861 scope.go:117] "RemoveContainer" containerID="e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4" Jan 29 06:55:17 crc kubenswrapper[4861]: I0129 06:55:17.076517 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5977db5ddd-9kzf9" event={"ID":"c10aa65c-076c-4150-a573-d945be1b9c58","Type":"ContainerStarted","Data":"bd89dffc20a0d0c96b40c40a92471914f9ee11b78e9c9c44b9b150003eb51c61"} Jan 29 06:55:17 crc kubenswrapper[4861]: I0129 06:55:17.077894 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6896cf67f5-ztfst" event={"ID":"b670e77a-b666-4fe3-bc2f-8ceb13c819a0","Type":"ContainerStarted","Data":"6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50"} Jan 29 06:55:17 crc kubenswrapper[4861]: I0129 06:55:17.079567 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d9d5fc54d-kq4c4" event={"ID":"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3","Type":"ContainerStarted","Data":"f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5"} Jan 29 06:55:17 crc kubenswrapper[4861]: I0129 06:55:17.125653 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec1f6ee9-55ac-465f-92c3-0b08506db348" path="/var/lib/kubelet/pods/ec1f6ee9-55ac-465f-92c3-0b08506db348/volumes" Jan 29 06:55:17 crc kubenswrapper[4861]: E0129 06:55:17.866412 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice/crio-ddf002d2b21cac7beb20b97fe76c1d0bda80dbc4238c13fb2244922405a64059\": RecentStats: unable to find data in memory cache]" Jan 29 06:55:18 crc kubenswrapper[4861]: I0129 06:55:18.098875 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:55:18 crc kubenswrapper[4861]: I0129 06:55:18.099125 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:55:18 crc kubenswrapper[4861]: I0129 06:55:18.190959 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 06:55:18 crc kubenswrapper[4861]: I0129 06:55:18.191532 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 06:55:18 crc kubenswrapper[4861]: I0129 06:55:18.198048 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:18 crc kubenswrapper[4861]: I0129 06:55:18.198165 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:55:18 crc kubenswrapper[4861]: I0129 06:55:18.216947 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 06:55:19 crc kubenswrapper[4861]: I0129 06:55:19.528184 4861 scope.go:117] "RemoveContainer" containerID="01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b" Jan 29 06:55:19 crc kubenswrapper[4861]: E0129 06:55:19.529401 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b\": container with ID starting with 01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b not found: ID does not exist" containerID="01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b" Jan 29 06:55:19 crc kubenswrapper[4861]: I0129 06:55:19.529442 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b"} err="failed to get container status \"01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b\": rpc error: code = NotFound desc = could not find container \"01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b\": container with ID starting with 01ef3a938092f2a8acfd546902c5594af137c38fe8a8c5f6422094dcc4a8cf2b not found: ID does not exist" Jan 29 06:55:19 crc kubenswrapper[4861]: I0129 06:55:19.529472 4861 scope.go:117] "RemoveContainer" containerID="e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4" Jan 29 06:55:19 crc kubenswrapper[4861]: E0129 06:55:19.530732 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4\": container with ID starting with e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4 not found: ID does not exist" containerID="e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4" Jan 29 06:55:19 crc kubenswrapper[4861]: I0129 06:55:19.530766 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4"} err="failed to get container status \"e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4\": rpc error: code = NotFound desc = could not find container \"e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4\": container with ID starting with e698b70f756cbfc47a2c0beeee49408a99d313af7c3f7bef26b5f47a23aaa2f4 not found: ID does not exist" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.152919 4861 generic.go:334] "Generic (PLEG): container finished" podID="d200c5c2-f7a9-4db9-b65c-18658065131d" containerID="048dbf5066137bb4e571a0ddf6f23cd5da6fb05d04084cbec4c293105909f316" exitCode=0 Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.153316 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-g89td" event={"ID":"d200c5c2-f7a9-4db9-b65c-18658065131d","Type":"ContainerDied","Data":"048dbf5066137bb4e571a0ddf6f23cd5da6fb05d04084cbec4c293105909f316"} Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.197194 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d8b667488-v7lmh" event={"ID":"947c222c-8f0c-423f-84e8-75a4b9322829","Type":"ContainerStarted","Data":"9a86c0075a1b39a77b81d03d33eaf3de19430f272ef69df32ef5227eb2cfdfbd"} Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.230747 4861 generic.go:334] "Generic (PLEG): container finished" podID="c10aa65c-076c-4150-a573-d945be1b9c58" containerID="810f691c0b8c15fe2499d709b5e4180605aa714baebf718a43d4bdfaeb9bd290" exitCode=1 Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.230823 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5977db5ddd-9kzf9" event={"ID":"c10aa65c-076c-4150-a573-d945be1b9c58","Type":"ContainerDied","Data":"810f691c0b8c15fe2499d709b5e4180605aa714baebf718a43d4bdfaeb9bd290"} Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.230951 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.231569 4861 scope.go:117] "RemoveContainer" containerID="810f691c0b8c15fe2499d709b5e4180605aa714baebf718a43d4bdfaeb9bd290" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.235525 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" event={"ID":"946bf576-dbb6-4284-9794-8330f6213430","Type":"ContainerStarted","Data":"311e24d561d23d381f387cca73e093ca8ea2c8e85a82b5ae4b7550323de0fad0"} Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.236391 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.252270 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6896cf67f5-ztfst" event={"ID":"b670e77a-b666-4fe3-bc2f-8ceb13c819a0","Type":"ContainerStarted","Data":"4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee"} Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.253895 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.275803 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" podStartSLOduration=10.275778825 podStartE2EDuration="10.275778825s" podCreationTimestamp="2026-01-29 06:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:20.266548809 +0000 UTC m=+1211.938043366" watchObservedRunningTime="2026-01-29 06:55:20.275778825 +0000 UTC m=+1211.947273382" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.280642 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-54cf866f89-b4nzm" event={"ID":"a71f5c10-5dfd-4620-a5a9-e44593f90221","Type":"ContainerStarted","Data":"1bd643bfd5a239a4c5d1cd11d56a7f00db10e323305ce5d1c290ad5a9bd08930"} Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.294130 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6896cf67f5-ztfst" podStartSLOduration=9.29359245 podStartE2EDuration="9.29359245s" podCreationTimestamp="2026-01-29 06:55:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:20.2849831 +0000 UTC m=+1211.956477677" watchObservedRunningTime="2026-01-29 06:55:20.29359245 +0000 UTC m=+1211.965087017" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.308021 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-d9d5fc54d-kq4c4" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerName="barbican-api-log" containerID="cri-o://f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5" gracePeriod=30 Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.308244 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d9d5fc54d-kq4c4" event={"ID":"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3","Type":"ContainerStarted","Data":"dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318"} Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.308360 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.308457 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.308827 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-d9d5fc54d-kq4c4" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerName="barbican-api" containerID="cri-o://dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318" gracePeriod=30 Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.356397 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-d9d5fc54d-kq4c4" podStartSLOduration=10.356376604 podStartE2EDuration="10.356376604s" podCreationTimestamp="2026-01-29 06:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:20.341677978 +0000 UTC m=+1212.013172535" watchObservedRunningTime="2026-01-29 06:55:20.356376604 +0000 UTC m=+1212.027871161" Jan 29 06:55:20 crc kubenswrapper[4861]: I0129 06:55:20.618265 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.317047 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" event={"ID":"e3e93bc3-461f-4bc0-ad61-f3c0d745f328","Type":"ContainerStarted","Data":"e132425a19e8dda6025381aa3ae9bbcd203c8b07a2c459a6f224a85f73cc6764"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.317377 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" event={"ID":"e3e93bc3-461f-4bc0-ad61-f3c0d745f328","Type":"ContainerStarted","Data":"8ef9f15bbfd7eb47c5dfa5ff35831589423c4ce32b5278ea0afce2029e107196"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.318827 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" event={"ID":"bd9ed061-0329-42e0-8cca-e7b560c7a19c","Type":"ContainerStarted","Data":"c312a62932c0664c9e8bde4d291603923f2e2f621322670c3abe137b105616db"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.318855 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" event={"ID":"bd9ed061-0329-42e0-8cca-e7b560c7a19c","Type":"ContainerStarted","Data":"e098f103ebd71969e2f8e2fd838a304a15e40397ed8a4eba94af458e0afc7a28"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.320884 4861 generic.go:334] "Generic (PLEG): container finished" podID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerID="f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5" exitCode=143 Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.320945 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d9d5fc54d-kq4c4" event={"ID":"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3","Type":"ContainerDied","Data":"f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.322211 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d8b667488-v7lmh" event={"ID":"947c222c-8f0c-423f-84e8-75a4b9322829","Type":"ContainerStarted","Data":"a95782607794ffb075c46075902d651f0f6db3732fc62ef57c3c4e66ef00c4f4"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.322629 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.322655 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.324508 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b8d96575c-7zzfv" event={"ID":"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3","Type":"ContainerStarted","Data":"12d75e57461b87b2ec9d6d00c1b304c2545872ff7dcee032f19e27ff512c2516"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.324532 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b8d96575c-7zzfv" event={"ID":"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3","Type":"ContainerStarted","Data":"75481cbc7bf2e4776643277e100d6f7fcc456f612bd0f4c451db4c8198750b42"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.327545 4861 generic.go:334] "Generic (PLEG): container finished" podID="c10aa65c-076c-4150-a573-d945be1b9c58" containerID="4535d6cd1d0a07d42e521f2a195d9e02c782ba1eb93a277b271dfaa8a91266a1" exitCode=1 Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.327622 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5977db5ddd-9kzf9" event={"ID":"c10aa65c-076c-4150-a573-d945be1b9c58","Type":"ContainerDied","Data":"4535d6cd1d0a07d42e521f2a195d9e02c782ba1eb93a277b271dfaa8a91266a1"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.327649 4861 scope.go:117] "RemoveContainer" containerID="810f691c0b8c15fe2499d709b5e4180605aa714baebf718a43d4bdfaeb9bd290" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.328041 4861 scope.go:117] "RemoveContainer" containerID="4535d6cd1d0a07d42e521f2a195d9e02c782ba1eb93a277b271dfaa8a91266a1" Jan 29 06:55:21 crc kubenswrapper[4861]: E0129 06:55:21.328431 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5977db5ddd-9kzf9_openstack(c10aa65c-076c-4150-a573-d945be1b9c58)\"" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.330666 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-c5jkq" event={"ID":"8c6201c8-50a8-4b95-82e0-b944b78348d6","Type":"ContainerStarted","Data":"b54a873352b23c17ed2e2e12b008503e125d8e142b83ddde0068a40136cd6a56"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.333152 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-54cf866f89-b4nzm" event={"ID":"a71f5c10-5dfd-4620-a5a9-e44593f90221","Type":"ContainerStarted","Data":"da2d08c17ba459b3791f3cea9d0813876562acc8611e2417dbf2b4c4c8859a0e"} Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.335199 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: connect: connection refused" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.346484 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" podStartSLOduration=6.408673504 podStartE2EDuration="11.346471885s" podCreationTimestamp="2026-01-29 06:55:10 +0000 UTC" firstStartedPulling="2026-01-29 06:55:14.724902604 +0000 UTC m=+1206.396397161" lastFinishedPulling="2026-01-29 06:55:19.662700985 +0000 UTC m=+1211.334195542" observedRunningTime="2026-01-29 06:55:21.342874113 +0000 UTC m=+1213.014368670" watchObservedRunningTime="2026-01-29 06:55:21.346471885 +0000 UTC m=+1213.017966442" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.397807 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6b8d96575c-7zzfv" podStartSLOduration=3.276867271 podStartE2EDuration="7.397785736s" podCreationTimestamp="2026-01-29 06:55:14 +0000 UTC" firstStartedPulling="2026-01-29 06:55:15.565016204 +0000 UTC m=+1207.236510761" lastFinishedPulling="2026-01-29 06:55:19.685934669 +0000 UTC m=+1211.357429226" observedRunningTime="2026-01-29 06:55:21.378931284 +0000 UTC m=+1213.050425851" watchObservedRunningTime="2026-01-29 06:55:21.397785736 +0000 UTC m=+1213.069280303" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.438251 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-c5jkq" podStartSLOduration=3.355605619 podStartE2EDuration="44.438231649s" podCreationTimestamp="2026-01-29 06:54:37 +0000 UTC" firstStartedPulling="2026-01-29 06:54:38.8128483 +0000 UTC m=+1170.484342857" lastFinishedPulling="2026-01-29 06:55:19.89547432 +0000 UTC m=+1211.566968887" observedRunningTime="2026-01-29 06:55:21.397299793 +0000 UTC m=+1213.068794370" watchObservedRunningTime="2026-01-29 06:55:21.438231649 +0000 UTC m=+1213.109726206" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.449975 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-54cf866f89-b4nzm"] Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.458937 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" podStartSLOduration=3.107885474 podStartE2EDuration="7.458922427s" podCreationTimestamp="2026-01-29 06:55:14 +0000 UTC" firstStartedPulling="2026-01-29 06:55:15.382615475 +0000 UTC m=+1207.054110032" lastFinishedPulling="2026-01-29 06:55:19.733652418 +0000 UTC m=+1211.405146985" observedRunningTime="2026-01-29 06:55:21.449780104 +0000 UTC m=+1213.121274661" watchObservedRunningTime="2026-01-29 06:55:21.458922427 +0000 UTC m=+1213.130416984" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.489382 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-75945d9bdb-6szss"] Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.508850 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-d8b667488-v7lmh" podStartSLOduration=7.508830652 podStartE2EDuration="7.508830652s" podCreationTimestamp="2026-01-29 06:55:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:21.486570344 +0000 UTC m=+1213.158064911" watchObservedRunningTime="2026-01-29 06:55:21.508830652 +0000 UTC m=+1213.180325209" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.534601 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-54cf866f89-b4nzm" podStartSLOduration=6.676323891 podStartE2EDuration="11.53457931s" podCreationTimestamp="2026-01-29 06:55:10 +0000 UTC" firstStartedPulling="2026-01-29 06:55:14.829042655 +0000 UTC m=+1206.500537212" lastFinishedPulling="2026-01-29 06:55:19.687298074 +0000 UTC m=+1211.358792631" observedRunningTime="2026-01-29 06:55:21.516435507 +0000 UTC m=+1213.187930064" watchObservedRunningTime="2026-01-29 06:55:21.53457931 +0000 UTC m=+1213.206073867" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.756862 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-g89td" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.860890 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-combined-ca-bundle\") pod \"d200c5c2-f7a9-4db9-b65c-18658065131d\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.861064 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-config-data\") pod \"d200c5c2-f7a9-4db9-b65c-18658065131d\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.861117 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d200c5c2-f7a9-4db9-b65c-18658065131d-logs\") pod \"d200c5c2-f7a9-4db9-b65c-18658065131d\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.861177 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-scripts\") pod \"d200c5c2-f7a9-4db9-b65c-18658065131d\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.861211 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xflmb\" (UniqueName: \"kubernetes.io/projected/d200c5c2-f7a9-4db9-b65c-18658065131d-kube-api-access-xflmb\") pod \"d200c5c2-f7a9-4db9-b65c-18658065131d\" (UID: \"d200c5c2-f7a9-4db9-b65c-18658065131d\") " Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.862667 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d200c5c2-f7a9-4db9-b65c-18658065131d-logs" (OuterVolumeSpecName: "logs") pod "d200c5c2-f7a9-4db9-b65c-18658065131d" (UID: "d200c5c2-f7a9-4db9-b65c-18658065131d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.867325 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d200c5c2-f7a9-4db9-b65c-18658065131d-kube-api-access-xflmb" (OuterVolumeSpecName: "kube-api-access-xflmb") pod "d200c5c2-f7a9-4db9-b65c-18658065131d" (UID: "d200c5c2-f7a9-4db9-b65c-18658065131d"). InnerVolumeSpecName "kube-api-access-xflmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.880783 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-scripts" (OuterVolumeSpecName: "scripts") pod "d200c5c2-f7a9-4db9-b65c-18658065131d" (UID: "d200c5c2-f7a9-4db9-b65c-18658065131d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.908910 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-config-data" (OuterVolumeSpecName: "config-data") pod "d200c5c2-f7a9-4db9-b65c-18658065131d" (UID: "d200c5c2-f7a9-4db9-b65c-18658065131d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.962932 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.962962 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d200c5c2-f7a9-4db9-b65c-18658065131d-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.962971 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.962981 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xflmb\" (UniqueName: \"kubernetes.io/projected/d200c5c2-f7a9-4db9-b65c-18658065131d-kube-api-access-xflmb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:21 crc kubenswrapper[4861]: I0129 06:55:21.963094 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d200c5c2-f7a9-4db9-b65c-18658065131d" (UID: "d200c5c2-f7a9-4db9-b65c-18658065131d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.064615 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d200c5c2-f7a9-4db9-b65c-18658065131d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.287637 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.306489 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6957679874-pnq22"] Jan 29 06:55:22 crc kubenswrapper[4861]: E0129 06:55:22.306854 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec1f6ee9-55ac-465f-92c3-0b08506db348" containerName="dnsmasq-dns" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.306872 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec1f6ee9-55ac-465f-92c3-0b08506db348" containerName="dnsmasq-dns" Jan 29 06:55:22 crc kubenswrapper[4861]: E0129 06:55:22.306900 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerName="barbican-api-log" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.306907 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerName="barbican-api-log" Jan 29 06:55:22 crc kubenswrapper[4861]: E0129 06:55:22.306922 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d200c5c2-f7a9-4db9-b65c-18658065131d" containerName="placement-db-sync" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.306928 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d200c5c2-f7a9-4db9-b65c-18658065131d" containerName="placement-db-sync" Jan 29 06:55:22 crc kubenswrapper[4861]: E0129 06:55:22.306950 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec1f6ee9-55ac-465f-92c3-0b08506db348" containerName="init" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.306956 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec1f6ee9-55ac-465f-92c3-0b08506db348" containerName="init" Jan 29 06:55:22 crc kubenswrapper[4861]: E0129 06:55:22.306966 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerName="barbican-api" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.306972 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerName="barbican-api" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.307137 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d200c5c2-f7a9-4db9-b65c-18658065131d" containerName="placement-db-sync" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.307156 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerName="barbican-api" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.307170 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec1f6ee9-55ac-465f-92c3-0b08506db348" containerName="dnsmasq-dns" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.307187 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerName="barbican-api-log" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.310572 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.313417 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.315473 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.339360 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6957679874-pnq22"] Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.349706 4861 generic.go:334] "Generic (PLEG): container finished" podID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" containerID="dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318" exitCode=0 Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.349762 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d9d5fc54d-kq4c4" event={"ID":"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3","Type":"ContainerDied","Data":"dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318"} Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.349783 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d9d5fc54d-kq4c4" event={"ID":"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3","Type":"ContainerDied","Data":"8b8373531f81b9135e02abdc9412d8af465a5956774f5229be64485a0583adb7"} Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.349798 4861 scope.go:117] "RemoveContainer" containerID="dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.349900 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d9d5fc54d-kq4c4" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.370759 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-g89td" event={"ID":"d200c5c2-f7a9-4db9-b65c-18658065131d","Type":"ContainerDied","Data":"e40c7f0ae2a442b0075874a95657683c97638c0d6f8a94af8c2cac0422b7afb2"} Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.371563 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e40c7f0ae2a442b0075874a95657683c97638c0d6f8a94af8c2cac0422b7afb2" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.370951 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-g89td" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.371495 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data\") pod \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.372363 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data-custom\") pod \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.373109 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-combined-ca-bundle\") pod \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.373282 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfxcn\" (UniqueName: \"kubernetes.io/projected/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-kube-api-access-tfxcn\") pod \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.373566 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-logs\") pod \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\" (UID: \"c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3\") " Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.374205 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmz2z\" (UniqueName: \"kubernetes.io/projected/4b488de3-67a5-49cf-a61a-37a44acbbe19-kube-api-access-tmz2z\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.374343 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-config-data\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.374567 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b488de3-67a5-49cf-a61a-37a44acbbe19-logs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.374679 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-public-tls-certs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.374766 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-combined-ca-bundle\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.374850 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-scripts\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.374972 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-internal-tls-certs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.375467 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-logs" (OuterVolumeSpecName: "logs") pod "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" (UID: "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.379058 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" (UID: "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.379466 4861 scope.go:117] "RemoveContainer" containerID="4535d6cd1d0a07d42e521f2a195d9e02c782ba1eb93a277b271dfaa8a91266a1" Jan 29 06:55:22 crc kubenswrapper[4861]: E0129 06:55:22.379691 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5977db5ddd-9kzf9_openstack(c10aa65c-076c-4150-a573-d945be1b9c58)\"" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.381248 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: connect: connection refused" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.384402 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-kube-api-access-tfxcn" (OuterVolumeSpecName: "kube-api-access-tfxcn") pod "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" (UID: "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3"). InnerVolumeSpecName "kube-api-access-tfxcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.414694 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" (UID: "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.431871 4861 scope.go:117] "RemoveContainer" containerID="f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.453057 4861 scope.go:117] "RemoveContainer" containerID="dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318" Jan 29 06:55:22 crc kubenswrapper[4861]: E0129 06:55:22.453416 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318\": container with ID starting with dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318 not found: ID does not exist" containerID="dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.453446 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318"} err="failed to get container status \"dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318\": rpc error: code = NotFound desc = could not find container \"dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318\": container with ID starting with dc88bb0e3d31c0cd2af41df0156854f00cf41750b811cc00977ebe42d1787318 not found: ID does not exist" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.453465 4861 scope.go:117] "RemoveContainer" containerID="f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5" Jan 29 06:55:22 crc kubenswrapper[4861]: E0129 06:55:22.453812 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5\": container with ID starting with f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5 not found: ID does not exist" containerID="f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.453844 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5"} err="failed to get container status \"f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5\": rpc error: code = NotFound desc = could not find container \"f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5\": container with ID starting with f9a72c4ea5ff8aee2d40f0a140330ccf46595b5f71c3cdddebb9e24dbaa10be5 not found: ID does not exist" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.457375 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data" (OuterVolumeSpecName: "config-data") pod "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" (UID: "c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.477141 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-public-tls-certs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.477216 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-combined-ca-bundle\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.477265 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-scripts\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.477350 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-internal-tls-certs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.477458 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmz2z\" (UniqueName: \"kubernetes.io/projected/4b488de3-67a5-49cf-a61a-37a44acbbe19-kube-api-access-tmz2z\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.477479 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-config-data\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.477901 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b488de3-67a5-49cf-a61a-37a44acbbe19-logs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.478142 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfxcn\" (UniqueName: \"kubernetes.io/projected/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-kube-api-access-tfxcn\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.478163 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.478179 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.478191 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.478204 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.481582 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-combined-ca-bundle\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.481778 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-public-tls-certs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.484330 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b488de3-67a5-49cf-a61a-37a44acbbe19-logs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.484866 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-config-data\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.488565 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-internal-tls-certs\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.489318 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-scripts\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.502575 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmz2z\" (UniqueName: \"kubernetes.io/projected/4b488de3-67a5-49cf-a61a-37a44acbbe19-kube-api-access-tmz2z\") pod \"placement-6957679874-pnq22\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.626434 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.772085 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d9d5fc54d-kq4c4"] Jan 29 06:55:22 crc kubenswrapper[4861]: I0129 06:55:22.787597 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-d9d5fc54d-kq4c4"] Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.141293 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3" path="/var/lib/kubelet/pods/c2ee2aaa-7913-4413-95f4-0ed12ce9e3a3/volumes" Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.141854 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6957679874-pnq22"] Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.388495 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6957679874-pnq22" event={"ID":"4b488de3-67a5-49cf-a61a-37a44acbbe19","Type":"ContainerStarted","Data":"2ff6119bc7bdded212166337823d1530f1ce03fc46b0826b2b3372aa77b0c84f"} Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.390752 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerName="barbican-keystone-listener-log" containerID="cri-o://8ef9f15bbfd7eb47c5dfa5ff35831589423c4ce32b5278ea0afce2029e107196" gracePeriod=30 Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.391257 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerName="barbican-keystone-listener" containerID="cri-o://e132425a19e8dda6025381aa3ae9bbcd203c8b07a2c459a6f224a85f73cc6764" gracePeriod=30 Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.391445 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-54cf866f89-b4nzm" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerName="barbican-worker" containerID="cri-o://da2d08c17ba459b3791f3cea9d0813876562acc8611e2417dbf2b4c4c8859a0e" gracePeriod=30 Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.391450 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-54cf866f89-b4nzm" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerName="barbican-worker-log" containerID="cri-o://1bd643bfd5a239a4c5d1cd11d56a7f00db10e323305ce5d1c290ad5a9bd08930" gracePeriod=30 Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.617666 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.618442 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: connect: connection refused" Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.618472 4861 scope.go:117] "RemoveContainer" containerID="4535d6cd1d0a07d42e521f2a195d9e02c782ba1eb93a277b271dfaa8a91266a1" Jan 29 06:55:23 crc kubenswrapper[4861]: I0129 06:55:23.618507 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: connect: connection refused" Jan 29 06:55:23 crc kubenswrapper[4861]: E0129 06:55:23.618748 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5977db5ddd-9kzf9_openstack(c10aa65c-076c-4150-a573-d945be1b9c58)\"" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.405712 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6957679874-pnq22" event={"ID":"4b488de3-67a5-49cf-a61a-37a44acbbe19","Type":"ContainerStarted","Data":"64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8"} Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.406363 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6957679874-pnq22" event={"ID":"4b488de3-67a5-49cf-a61a-37a44acbbe19","Type":"ContainerStarted","Data":"403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b"} Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.406645 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.406696 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.412587 4861 generic.go:334] "Generic (PLEG): container finished" podID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerID="da2d08c17ba459b3791f3cea9d0813876562acc8611e2417dbf2b4c4c8859a0e" exitCode=0 Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.413464 4861 generic.go:334] "Generic (PLEG): container finished" podID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerID="1bd643bfd5a239a4c5d1cd11d56a7f00db10e323305ce5d1c290ad5a9bd08930" exitCode=143 Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.413506 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-54cf866f89-b4nzm" event={"ID":"a71f5c10-5dfd-4620-a5a9-e44593f90221","Type":"ContainerDied","Data":"da2d08c17ba459b3791f3cea9d0813876562acc8611e2417dbf2b4c4c8859a0e"} Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.413564 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-54cf866f89-b4nzm" event={"ID":"a71f5c10-5dfd-4620-a5a9-e44593f90221","Type":"ContainerDied","Data":"1bd643bfd5a239a4c5d1cd11d56a7f00db10e323305ce5d1c290ad5a9bd08930"} Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.418809 4861 generic.go:334] "Generic (PLEG): container finished" podID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerID="e132425a19e8dda6025381aa3ae9bbcd203c8b07a2c459a6f224a85f73cc6764" exitCode=0 Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.418854 4861 generic.go:334] "Generic (PLEG): container finished" podID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerID="8ef9f15bbfd7eb47c5dfa5ff35831589423c4ce32b5278ea0afce2029e107196" exitCode=143 Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.418885 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" event={"ID":"e3e93bc3-461f-4bc0-ad61-f3c0d745f328","Type":"ContainerDied","Data":"e132425a19e8dda6025381aa3ae9bbcd203c8b07a2c459a6f224a85f73cc6764"} Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.418921 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" event={"ID":"e3e93bc3-461f-4bc0-ad61-f3c0d745f328","Type":"ContainerDied","Data":"8ef9f15bbfd7eb47c5dfa5ff35831589423c4ce32b5278ea0afce2029e107196"} Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.449418 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6957679874-pnq22" podStartSLOduration=2.449397086 podStartE2EDuration="2.449397086s" podCreationTimestamp="2026-01-29 06:55:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:24.43977287 +0000 UTC m=+1216.111267507" watchObservedRunningTime="2026-01-29 06:55:24.449397086 +0000 UTC m=+1216.120891653" Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.616067 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.616597 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: connect: connection refused" Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.616814 4861 scope.go:117] "RemoveContainer" containerID="4535d6cd1d0a07d42e521f2a195d9e02c782ba1eb93a277b271dfaa8a91266a1" Jan 29 06:55:24 crc kubenswrapper[4861]: E0129 06:55:24.617027 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5977db5ddd-9kzf9_openstack(c10aa65c-076c-4150-a573-d945be1b9c58)\"" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" Jan 29 06:55:24 crc kubenswrapper[4861]: I0129 06:55:24.625295 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: connect: connection refused" Jan 29 06:55:25 crc kubenswrapper[4861]: I0129 06:55:25.647795 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:25 crc kubenswrapper[4861]: I0129 06:55:25.726459 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-l64rb"] Jan 29 06:55:25 crc kubenswrapper[4861]: I0129 06:55:25.726998 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" podUID="c19001ec-52ca-4c24-8762-01590c5a843f" containerName="dnsmasq-dns" containerID="cri-o://0cdb16ce2c168c436b331e4bbdf40084165870485570a89136f5fcf62a1ecb90" gracePeriod=10 Jan 29 06:55:26 crc kubenswrapper[4861]: I0129 06:55:26.616748 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: connect: connection refused" Jan 29 06:55:27 crc kubenswrapper[4861]: I0129 06:55:27.456297 4861 generic.go:334] "Generic (PLEG): container finished" podID="c19001ec-52ca-4c24-8762-01590c5a843f" containerID="0cdb16ce2c168c436b331e4bbdf40084165870485570a89136f5fcf62a1ecb90" exitCode=0 Jan 29 06:55:27 crc kubenswrapper[4861]: I0129 06:55:27.456344 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" event={"ID":"c19001ec-52ca-4c24-8762-01590c5a843f","Type":"ContainerDied","Data":"0cdb16ce2c168c436b331e4bbdf40084165870485570a89136f5fcf62a1ecb90"} Jan 29 06:55:27 crc kubenswrapper[4861]: I0129 06:55:27.529549 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:27 crc kubenswrapper[4861]: I0129 06:55:27.570967 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-d8b667488-v7lmh" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:55:27 crc kubenswrapper[4861]: I0129 06:55:27.581677 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-d8b667488-v7lmh" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 06:55:27 crc kubenswrapper[4861]: I0129 06:55:27.889675 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" podUID="c19001ec-52ca-4c24-8762-01590c5a843f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Jan 29 06:55:28 crc kubenswrapper[4861]: E0129 06:55:28.136703 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice/crio-ddf002d2b21cac7beb20b97fe76c1d0bda80dbc4238c13fb2244922405a64059\": RecentStats: unable to find data in memory cache]" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.706917 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.710577 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799435 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data\") pod \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799487 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data\") pod \"a71f5c10-5dfd-4620-a5a9-e44593f90221\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799550 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htr4x\" (UniqueName: \"kubernetes.io/projected/a71f5c10-5dfd-4620-a5a9-e44593f90221-kube-api-access-htr4x\") pod \"a71f5c10-5dfd-4620-a5a9-e44593f90221\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799583 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data-custom\") pod \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799615 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcvx6\" (UniqueName: \"kubernetes.io/projected/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-kube-api-access-lcvx6\") pod \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799637 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-combined-ca-bundle\") pod \"a71f5c10-5dfd-4620-a5a9-e44593f90221\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799664 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a71f5c10-5dfd-4620-a5a9-e44593f90221-logs\") pod \"a71f5c10-5dfd-4620-a5a9-e44593f90221\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799720 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data-custom\") pod \"a71f5c10-5dfd-4620-a5a9-e44593f90221\" (UID: \"a71f5c10-5dfd-4620-a5a9-e44593f90221\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799743 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-logs\") pod \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.799758 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-combined-ca-bundle\") pod \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\" (UID: \"e3e93bc3-461f-4bc0-ad61-f3c0d745f328\") " Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.801756 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-logs" (OuterVolumeSpecName: "logs") pod "e3e93bc3-461f-4bc0-ad61-f3c0d745f328" (UID: "e3e93bc3-461f-4bc0-ad61-f3c0d745f328"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.801867 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a71f5c10-5dfd-4620-a5a9-e44593f90221-logs" (OuterVolumeSpecName: "logs") pod "a71f5c10-5dfd-4620-a5a9-e44593f90221" (UID: "a71f5c10-5dfd-4620-a5a9-e44593f90221"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.809473 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a71f5c10-5dfd-4620-a5a9-e44593f90221-kube-api-access-htr4x" (OuterVolumeSpecName: "kube-api-access-htr4x") pod "a71f5c10-5dfd-4620-a5a9-e44593f90221" (UID: "a71f5c10-5dfd-4620-a5a9-e44593f90221"). InnerVolumeSpecName "kube-api-access-htr4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.809636 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a71f5c10-5dfd-4620-a5a9-e44593f90221" (UID: "a71f5c10-5dfd-4620-a5a9-e44593f90221"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.812254 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-kube-api-access-lcvx6" (OuterVolumeSpecName: "kube-api-access-lcvx6") pod "e3e93bc3-461f-4bc0-ad61-f3c0d745f328" (UID: "e3e93bc3-461f-4bc0-ad61-f3c0d745f328"). InnerVolumeSpecName "kube-api-access-lcvx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.819721 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e3e93bc3-461f-4bc0-ad61-f3c0d745f328" (UID: "e3e93bc3-461f-4bc0-ad61-f3c0d745f328"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.847306 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3e93bc3-461f-4bc0-ad61-f3c0d745f328" (UID: "e3e93bc3-461f-4bc0-ad61-f3c0d745f328"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.863341 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a71f5c10-5dfd-4620-a5a9-e44593f90221" (UID: "a71f5c10-5dfd-4620-a5a9-e44593f90221"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901443 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data" (OuterVolumeSpecName: "config-data") pod "a71f5c10-5dfd-4620-a5a9-e44593f90221" (UID: "a71f5c10-5dfd-4620-a5a9-e44593f90221"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901776 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901805 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901814 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901822 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htr4x\" (UniqueName: \"kubernetes.io/projected/a71f5c10-5dfd-4620-a5a9-e44593f90221-kube-api-access-htr4x\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901833 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901840 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcvx6\" (UniqueName: \"kubernetes.io/projected/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-kube-api-access-lcvx6\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901848 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901858 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a71f5c10-5dfd-4620-a5a9-e44593f90221-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.901866 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a71f5c10-5dfd-4620-a5a9-e44593f90221-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:28 crc kubenswrapper[4861]: I0129 06:55:28.909897 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data" (OuterVolumeSpecName: "config-data") pod "e3e93bc3-461f-4bc0-ad61-f3c0d745f328" (UID: "e3e93bc3-461f-4bc0-ad61-f3c0d745f328"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.004290 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e93bc3-461f-4bc0-ad61-f3c0d745f328-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.209915 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.283660 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5977db5ddd-9kzf9"] Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.283897 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" containerID="cri-o://bd89dffc20a0d0c96b40c40a92471914f9ee11b78e9c9c44b9b150003eb51c61" gracePeriod=30 Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.285541 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": dial tcp 10.217.0.162:9311: connect: connection refused" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.382534 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.412983 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-config\") pod \"c19001ec-52ca-4c24-8762-01590c5a843f\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.413041 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cf2ln\" (UniqueName: \"kubernetes.io/projected/c19001ec-52ca-4c24-8762-01590c5a843f-kube-api-access-cf2ln\") pod \"c19001ec-52ca-4c24-8762-01590c5a843f\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.413130 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-nb\") pod \"c19001ec-52ca-4c24-8762-01590c5a843f\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.413231 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-dns-svc\") pod \"c19001ec-52ca-4c24-8762-01590c5a843f\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.421549 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c19001ec-52ca-4c24-8762-01590c5a843f-kube-api-access-cf2ln" (OuterVolumeSpecName: "kube-api-access-cf2ln") pod "c19001ec-52ca-4c24-8762-01590c5a843f" (UID: "c19001ec-52ca-4c24-8762-01590c5a843f"). InnerVolumeSpecName "kube-api-access-cf2ln". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.462461 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-config" (OuterVolumeSpecName: "config") pod "c19001ec-52ca-4c24-8762-01590c5a843f" (UID: "c19001ec-52ca-4c24-8762-01590c5a843f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.462498 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c19001ec-52ca-4c24-8762-01590c5a843f" (UID: "c19001ec-52ca-4c24-8762-01590c5a843f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.477176 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.477147 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75945d9bdb-6szss" event={"ID":"e3e93bc3-461f-4bc0-ad61-f3c0d745f328","Type":"ContainerDied","Data":"787f206c66905f39da63ffec71278974810ed6568d0de1b73df1bd82eb90638b"} Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.477253 4861 scope.go:117] "RemoveContainer" containerID="e132425a19e8dda6025381aa3ae9bbcd203c8b07a2c459a6f224a85f73cc6764" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.478259 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c19001ec-52ca-4c24-8762-01590c5a843f" (UID: "c19001ec-52ca-4c24-8762-01590c5a843f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.481901 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.482001 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-l64rb" event={"ID":"c19001ec-52ca-4c24-8762-01590c5a843f","Type":"ContainerDied","Data":"9cee34911c42ac5398bfcea688c2bda63d7d03acf32a6373303aa0cd7f8c6c23"} Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.484304 4861 generic.go:334] "Generic (PLEG): container finished" podID="c10aa65c-076c-4150-a573-d945be1b9c58" containerID="bd89dffc20a0d0c96b40c40a92471914f9ee11b78e9c9c44b9b150003eb51c61" exitCode=143 Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.484366 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5977db5ddd-9kzf9" event={"ID":"c10aa65c-076c-4150-a573-d945be1b9c58","Type":"ContainerDied","Data":"bd89dffc20a0d0c96b40c40a92471914f9ee11b78e9c9c44b9b150003eb51c61"} Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.491859 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-54cf866f89-b4nzm" event={"ID":"a71f5c10-5dfd-4620-a5a9-e44593f90221","Type":"ContainerDied","Data":"c941265dddd263181596b8aa10a8cbb145a206fc78ba2d11e018dbdc52bb0091"} Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.491945 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-54cf866f89-b4nzm" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.501449 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-75945d9bdb-6szss"] Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.506359 4861 scope.go:117] "RemoveContainer" containerID="8ef9f15bbfd7eb47c5dfa5ff35831589423c4ce32b5278ea0afce2029e107196" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.514804 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-75945d9bdb-6szss"] Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.516967 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-sb\") pod \"c19001ec-52ca-4c24-8762-01590c5a843f\" (UID: \"c19001ec-52ca-4c24-8762-01590c5a843f\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.517479 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.517494 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.517503 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cf2ln\" (UniqueName: \"kubernetes.io/projected/c19001ec-52ca-4c24-8762-01590c5a843f-kube-api-access-cf2ln\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.517513 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.531806 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-54cf866f89-b4nzm"] Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.545576 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-54cf866f89-b4nzm"] Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.550226 4861 scope.go:117] "RemoveContainer" containerID="0cdb16ce2c168c436b331e4bbdf40084165870485570a89136f5fcf62a1ecb90" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.566405 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c19001ec-52ca-4c24-8762-01590c5a843f" (UID: "c19001ec-52ca-4c24-8762-01590c5a843f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.580936 4861 scope.go:117] "RemoveContainer" containerID="cb5a44707b0824360acb1f4a13b895be9162af2cd1d5933432c10c775e0ebeda" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.623601 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c19001ec-52ca-4c24-8762-01590c5a843f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.742971 4861 scope.go:117] "RemoveContainer" containerID="da2d08c17ba459b3791f3cea9d0813876562acc8611e2417dbf2b4c4c8859a0e" Jan 29 06:55:29 crc kubenswrapper[4861]: E0129 06:55:29.833928 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.883341 4861 scope.go:117] "RemoveContainer" containerID="1bd643bfd5a239a4c5d1cd11d56a7f00db10e323305ce5d1c290ad5a9bd08930" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.884148 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.918741 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-l64rb"] Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.930573 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-l64rb"] Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.930972 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dbpd\" (UniqueName: \"kubernetes.io/projected/c10aa65c-076c-4150-a573-d945be1b9c58-kube-api-access-7dbpd\") pod \"c10aa65c-076c-4150-a573-d945be1b9c58\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.931077 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data\") pod \"c10aa65c-076c-4150-a573-d945be1b9c58\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.931196 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c10aa65c-076c-4150-a573-d945be1b9c58-logs\") pod \"c10aa65c-076c-4150-a573-d945be1b9c58\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.931260 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-combined-ca-bundle\") pod \"c10aa65c-076c-4150-a573-d945be1b9c58\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.931293 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data-custom\") pod \"c10aa65c-076c-4150-a573-d945be1b9c58\" (UID: \"c10aa65c-076c-4150-a573-d945be1b9c58\") " Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.932386 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c10aa65c-076c-4150-a573-d945be1b9c58-logs" (OuterVolumeSpecName: "logs") pod "c10aa65c-076c-4150-a573-d945be1b9c58" (UID: "c10aa65c-076c-4150-a573-d945be1b9c58"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.935513 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c10aa65c-076c-4150-a573-d945be1b9c58-kube-api-access-7dbpd" (OuterVolumeSpecName: "kube-api-access-7dbpd") pod "c10aa65c-076c-4150-a573-d945be1b9c58" (UID: "c10aa65c-076c-4150-a573-d945be1b9c58"). InnerVolumeSpecName "kube-api-access-7dbpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.937700 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c10aa65c-076c-4150-a573-d945be1b9c58" (UID: "c10aa65c-076c-4150-a573-d945be1b9c58"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.972459 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c10aa65c-076c-4150-a573-d945be1b9c58" (UID: "c10aa65c-076c-4150-a573-d945be1b9c58"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:29 crc kubenswrapper[4861]: I0129 06:55:29.987959 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data" (OuterVolumeSpecName: "config-data") pod "c10aa65c-076c-4150-a573-d945be1b9c58" (UID: "c10aa65c-076c-4150-a573-d945be1b9c58"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.034792 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.034818 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.034829 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dbpd\" (UniqueName: \"kubernetes.io/projected/c10aa65c-076c-4150-a573-d945be1b9c58-kube-api-access-7dbpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.034838 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10aa65c-076c-4150-a573-d945be1b9c58-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.034846 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c10aa65c-076c-4150-a573-d945be1b9c58-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.505810 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7de4c2d2-3468-4522-8f5d-5acf0d1c4806","Type":"ContainerStarted","Data":"3997a1b69f09fc7100f9ce9317788cbf9762d68f7035879be4de1fcc25b2c936"} Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.505933 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="ceilometer-notification-agent" containerID="cri-o://755619eeeb3119d470cf51a2e24d614ba425009925bd792e194519f0d1a42aaf" gracePeriod=30 Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.506014 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.506059 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="proxy-httpd" containerID="cri-o://3997a1b69f09fc7100f9ce9317788cbf9762d68f7035879be4de1fcc25b2c936" gracePeriod=30 Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.506191 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="sg-core" containerID="cri-o://adf5a98d31ec7921da169da60ea413217b543186b9c30f6c3c4531f922be78c6" gracePeriod=30 Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.524586 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5977db5ddd-9kzf9" event={"ID":"c10aa65c-076c-4150-a573-d945be1b9c58","Type":"ContainerDied","Data":"f3296b96d29dfc65ccdfd55871dd085f3ad9dca1b7dbc73ba3fba9fba5f8f5b9"} Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.524659 4861 scope.go:117] "RemoveContainer" containerID="4535d6cd1d0a07d42e521f2a195d9e02c782ba1eb93a277b271dfaa8a91266a1" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.524840 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5977db5ddd-9kzf9" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.540601 4861 generic.go:334] "Generic (PLEG): container finished" podID="8c6201c8-50a8-4b95-82e0-b944b78348d6" containerID="b54a873352b23c17ed2e2e12b008503e125d8e142b83ddde0068a40136cd6a56" exitCode=0 Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.540651 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-c5jkq" event={"ID":"8c6201c8-50a8-4b95-82e0-b944b78348d6","Type":"ContainerDied","Data":"b54a873352b23c17ed2e2e12b008503e125d8e142b83ddde0068a40136cd6a56"} Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.575797 4861 scope.go:117] "RemoveContainer" containerID="bd89dffc20a0d0c96b40c40a92471914f9ee11b78e9c9c44b9b150003eb51c61" Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.580186 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5977db5ddd-9kzf9"] Jan 29 06:55:30 crc kubenswrapper[4861]: I0129 06:55:30.586523 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5977db5ddd-9kzf9"] Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.127843 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" path="/var/lib/kubelet/pods/a71f5c10-5dfd-4620-a5a9-e44593f90221/volumes" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.129135 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" path="/var/lib/kubelet/pods/c10aa65c-076c-4150-a573-d945be1b9c58/volumes" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.129926 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c19001ec-52ca-4c24-8762-01590c5a843f" path="/var/lib/kubelet/pods/c19001ec-52ca-4c24-8762-01590c5a843f/volumes" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.131406 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" path="/var/lib/kubelet/pods/e3e93bc3-461f-4bc0-ad61-f3c0d745f328/volumes" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.555568 4861 generic.go:334] "Generic (PLEG): container finished" podID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerID="3997a1b69f09fc7100f9ce9317788cbf9762d68f7035879be4de1fcc25b2c936" exitCode=0 Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.555598 4861 generic.go:334] "Generic (PLEG): container finished" podID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerID="adf5a98d31ec7921da169da60ea413217b543186b9c30f6c3c4531f922be78c6" exitCode=2 Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.555687 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7de4c2d2-3468-4522-8f5d-5acf0d1c4806","Type":"ContainerDied","Data":"3997a1b69f09fc7100f9ce9317788cbf9762d68f7035879be4de1fcc25b2c936"} Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.555784 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7de4c2d2-3468-4522-8f5d-5acf0d1c4806","Type":"ContainerDied","Data":"adf5a98d31ec7921da169da60ea413217b543186b9c30f6c3c4531f922be78c6"} Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.886702 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.971538 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-config-data\") pod \"8c6201c8-50a8-4b95-82e0-b944b78348d6\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.971702 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-combined-ca-bundle\") pod \"8c6201c8-50a8-4b95-82e0-b944b78348d6\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.971739 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rv4cd\" (UniqueName: \"kubernetes.io/projected/8c6201c8-50a8-4b95-82e0-b944b78348d6-kube-api-access-rv4cd\") pod \"8c6201c8-50a8-4b95-82e0-b944b78348d6\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.971768 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c6201c8-50a8-4b95-82e0-b944b78348d6-etc-machine-id\") pod \"8c6201c8-50a8-4b95-82e0-b944b78348d6\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.971891 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-scripts\") pod \"8c6201c8-50a8-4b95-82e0-b944b78348d6\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.971995 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-db-sync-config-data\") pod \"8c6201c8-50a8-4b95-82e0-b944b78348d6\" (UID: \"8c6201c8-50a8-4b95-82e0-b944b78348d6\") " Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.972054 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8c6201c8-50a8-4b95-82e0-b944b78348d6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8c6201c8-50a8-4b95-82e0-b944b78348d6" (UID: "8c6201c8-50a8-4b95-82e0-b944b78348d6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.972477 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c6201c8-50a8-4b95-82e0-b944b78348d6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.977427 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "8c6201c8-50a8-4b95-82e0-b944b78348d6" (UID: "8c6201c8-50a8-4b95-82e0-b944b78348d6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.978613 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c6201c8-50a8-4b95-82e0-b944b78348d6-kube-api-access-rv4cd" (OuterVolumeSpecName: "kube-api-access-rv4cd") pod "8c6201c8-50a8-4b95-82e0-b944b78348d6" (UID: "8c6201c8-50a8-4b95-82e0-b944b78348d6"). InnerVolumeSpecName "kube-api-access-rv4cd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:31 crc kubenswrapper[4861]: I0129 06:55:31.980318 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-scripts" (OuterVolumeSpecName: "scripts") pod "8c6201c8-50a8-4b95-82e0-b944b78348d6" (UID: "8c6201c8-50a8-4b95-82e0-b944b78348d6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.016993 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c6201c8-50a8-4b95-82e0-b944b78348d6" (UID: "8c6201c8-50a8-4b95-82e0-b944b78348d6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.053306 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-config-data" (OuterVolumeSpecName: "config-data") pod "8c6201c8-50a8-4b95-82e0-b944b78348d6" (UID: "8c6201c8-50a8-4b95-82e0-b944b78348d6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.074607 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.074785 4861 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.074869 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.074943 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c6201c8-50a8-4b95-82e0-b944b78348d6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.075016 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rv4cd\" (UniqueName: \"kubernetes.io/projected/8c6201c8-50a8-4b95-82e0-b944b78348d6-kube-api-access-rv4cd\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.569999 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-c5jkq" event={"ID":"8c6201c8-50a8-4b95-82e0-b944b78348d6","Type":"ContainerDied","Data":"5e62b0f2857da3bd0b6030be8d4998377760485d905fdb602e7adde9d648cbfb"} Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.570040 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e62b0f2857da3bd0b6030be8d4998377760485d905fdb602e7adde9d648cbfb" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.571030 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-c5jkq" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871026 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871397 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerName="barbican-worker-log" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871413 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerName="barbican-worker-log" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871428 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871435 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871451 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerName="barbican-keystone-listener" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871457 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerName="barbican-keystone-listener" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871473 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c19001ec-52ca-4c24-8762-01590c5a843f" containerName="init" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871480 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c19001ec-52ca-4c24-8762-01590c5a843f" containerName="init" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871490 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871496 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871506 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerName="barbican-keystone-listener-log" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871511 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerName="barbican-keystone-listener-log" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871522 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c6201c8-50a8-4b95-82e0-b944b78348d6" containerName="cinder-db-sync" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871527 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c6201c8-50a8-4b95-82e0-b944b78348d6" containerName="cinder-db-sync" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871540 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c19001ec-52ca-4c24-8762-01590c5a843f" containerName="dnsmasq-dns" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871545 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c19001ec-52ca-4c24-8762-01590c5a843f" containerName="dnsmasq-dns" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871555 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerName="barbican-worker" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871561 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerName="barbican-worker" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871723 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871736 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerName="barbican-keystone-listener-log" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871752 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c19001ec-52ca-4c24-8762-01590c5a843f" containerName="dnsmasq-dns" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871760 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c6201c8-50a8-4b95-82e0-b944b78348d6" containerName="cinder-db-sync" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871771 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871780 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3e93bc3-461f-4bc0-ad61-f3c0d745f328" containerName="barbican-keystone-listener" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871790 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerName="barbican-worker-log" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871800 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a71f5c10-5dfd-4620-a5a9-e44593f90221" containerName="barbican-worker" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871807 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api" Jan 29 06:55:32 crc kubenswrapper[4861]: E0129 06:55:32.871958 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.871966 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.872666 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.889479 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.895571 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.895878 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.896020 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.896636 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-bhr6h" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.963751 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-b5m5w"] Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.972063 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.989322 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-b5m5w"] Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.998392 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.998494 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcl4c\" (UniqueName: \"kubernetes.io/projected/65644c46-5abc-41bb-8451-535ea55c6090-kube-api-access-mcl4c\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.998543 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.998581 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65644c46-5abc-41bb-8451-535ea55c6090-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.998607 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-scripts\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:32 crc kubenswrapper[4861]: I0129 06:55:32.998649 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.099755 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65644c46-5abc-41bb-8451-535ea55c6090-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.099813 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-scripts\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.099857 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-nb\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.099895 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100054 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100147 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-swift-storage-0\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100212 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-config\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100299 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7x5w\" (UniqueName: \"kubernetes.io/projected/4e650b78-7110-40bc-b59e-fdaea7ecc619-kube-api-access-m7x5w\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100349 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcl4c\" (UniqueName: \"kubernetes.io/projected/65644c46-5abc-41bb-8451-535ea55c6090-kube-api-access-mcl4c\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100421 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100446 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-svc\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100487 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-sb\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.100761 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65644c46-5abc-41bb-8451-535ea55c6090-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.106622 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.119219 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcl4c\" (UniqueName: \"kubernetes.io/projected/65644c46-5abc-41bb-8451-535ea55c6090-kube-api-access-mcl4c\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.119553 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-scripts\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.131039 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.131152 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data\") pod \"cinder-scheduler-0\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.159944 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.162702 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.172788 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.178431 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.198398 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204619 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-swift-storage-0\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204659 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zp89\" (UniqueName: \"kubernetes.io/projected/467a8baa-1e95-4e23-b562-cc7901cfcbba-kube-api-access-8zp89\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204679 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-scripts\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204716 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-config\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204755 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204783 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7x5w\" (UniqueName: \"kubernetes.io/projected/4e650b78-7110-40bc-b59e-fdaea7ecc619-kube-api-access-m7x5w\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204827 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-svc\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204842 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data-custom\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204864 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-sb\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204879 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/467a8baa-1e95-4e23-b562-cc7901cfcbba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204918 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/467a8baa-1e95-4e23-b562-cc7901cfcbba-logs\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204953 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-nb\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.204980 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.205903 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-sb\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.206168 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-nb\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.206323 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-swift-storage-0\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.206852 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-svc\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.207670 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-config\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.235294 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7x5w\" (UniqueName: \"kubernetes.io/projected/4e650b78-7110-40bc-b59e-fdaea7ecc619-kube-api-access-m7x5w\") pod \"dnsmasq-dns-75dbb546bf-b5m5w\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.288826 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.308655 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.308790 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zp89\" (UniqueName: \"kubernetes.io/projected/467a8baa-1e95-4e23-b562-cc7901cfcbba-kube-api-access-8zp89\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.308810 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-scripts\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.308851 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.308921 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data-custom\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.308955 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/467a8baa-1e95-4e23-b562-cc7901cfcbba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.308976 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/467a8baa-1e95-4e23-b562-cc7901cfcbba-logs\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.309414 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/467a8baa-1e95-4e23-b562-cc7901cfcbba-logs\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.310246 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/467a8baa-1e95-4e23-b562-cc7901cfcbba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.317059 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.322734 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data-custom\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.326490 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zp89\" (UniqueName: \"kubernetes.io/projected/467a8baa-1e95-4e23-b562-cc7901cfcbba-kube-api-access-8zp89\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.376009 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.398761 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-scripts\") pod \"cinder-api-0\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.500147 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.774594 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:33 crc kubenswrapper[4861]: W0129 06:55:33.776659 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65644c46_5abc_41bb_8451_535ea55c6090.slice/crio-bf038990e249c7f22ec0f126f032513f1ed44643f80f86a50bcfc06cb98bb689 WatchSource:0}: Error finding container bf038990e249c7f22ec0f126f032513f1ed44643f80f86a50bcfc06cb98bb689: Status 404 returned error can't find the container with id bf038990e249c7f22ec0f126f032513f1ed44643f80f86a50bcfc06cb98bb689 Jan 29 06:55:33 crc kubenswrapper[4861]: W0129 06:55:33.968150 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod467a8baa_1e95_4e23_b562_cc7901cfcbba.slice/crio-c1d17bae8d94885a47e84fa56127ef711248919ea0114742d8e4e3e186a1420f WatchSource:0}: Error finding container c1d17bae8d94885a47e84fa56127ef711248919ea0114742d8e4e3e186a1420f: Status 404 returned error can't find the container with id c1d17bae8d94885a47e84fa56127ef711248919ea0114742d8e4e3e186a1420f Jan 29 06:55:33 crc kubenswrapper[4861]: I0129 06:55:33.968204 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.040211 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-b5m5w"] Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.592961 4861 generic.go:334] "Generic (PLEG): container finished" podID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerID="755619eeeb3119d470cf51a2e24d614ba425009925bd792e194519f0d1a42aaf" exitCode=0 Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.593020 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7de4c2d2-3468-4522-8f5d-5acf0d1c4806","Type":"ContainerDied","Data":"755619eeeb3119d470cf51a2e24d614ba425009925bd792e194519f0d1a42aaf"} Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.594956 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"467a8baa-1e95-4e23-b562-cc7901cfcbba","Type":"ContainerStarted","Data":"c1d17bae8d94885a47e84fa56127ef711248919ea0114742d8e4e3e186a1420f"} Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.599916 4861 generic.go:334] "Generic (PLEG): container finished" podID="4e650b78-7110-40bc-b59e-fdaea7ecc619" containerID="be60875a2de6cb8b593b08067130af18fa891776e7648f570ca9ab20a47f98b3" exitCode=0 Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.599972 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" event={"ID":"4e650b78-7110-40bc-b59e-fdaea7ecc619","Type":"ContainerDied","Data":"be60875a2de6cb8b593b08067130af18fa891776e7648f570ca9ab20a47f98b3"} Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.599994 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" event={"ID":"4e650b78-7110-40bc-b59e-fdaea7ecc619","Type":"ContainerStarted","Data":"f50725bc115507278db4f3b8d7af78365597fc1c209e3b81f486c538dfe8188b"} Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.607265 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65644c46-5abc-41bb-8451-535ea55c6090","Type":"ContainerStarted","Data":"bf038990e249c7f22ec0f126f032513f1ed44643f80f86a50bcfc06cb98bb689"} Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.623527 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5977db5ddd-9kzf9" podUID="c10aa65c-076c-4150-a573-d945be1b9c58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.690302 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.743475 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-combined-ca-bundle\") pod \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.743644 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-run-httpd\") pod \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.743669 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-config-data\") pod \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.743688 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjjx5\" (UniqueName: \"kubernetes.io/projected/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-kube-api-access-mjjx5\") pod \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.743745 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-log-httpd\") pod \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.743851 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-sg-core-conf-yaml\") pod \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.743880 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-scripts\") pod \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\" (UID: \"7de4c2d2-3468-4522-8f5d-5acf0d1c4806\") " Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.744045 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7de4c2d2-3468-4522-8f5d-5acf0d1c4806" (UID: "7de4c2d2-3468-4522-8f5d-5acf0d1c4806"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.744172 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7de4c2d2-3468-4522-8f5d-5acf0d1c4806" (UID: "7de4c2d2-3468-4522-8f5d-5acf0d1c4806"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.744571 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.744592 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.747331 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-scripts" (OuterVolumeSpecName: "scripts") pod "7de4c2d2-3468-4522-8f5d-5acf0d1c4806" (UID: "7de4c2d2-3468-4522-8f5d-5acf0d1c4806"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.747592 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-kube-api-access-mjjx5" (OuterVolumeSpecName: "kube-api-access-mjjx5") pod "7de4c2d2-3468-4522-8f5d-5acf0d1c4806" (UID: "7de4c2d2-3468-4522-8f5d-5acf0d1c4806"). InnerVolumeSpecName "kube-api-access-mjjx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.799809 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7de4c2d2-3468-4522-8f5d-5acf0d1c4806" (UID: "7de4c2d2-3468-4522-8f5d-5acf0d1c4806"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.820014 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7de4c2d2-3468-4522-8f5d-5acf0d1c4806" (UID: "7de4c2d2-3468-4522-8f5d-5acf0d1c4806"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.846653 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.846688 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.846700 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjjx5\" (UniqueName: \"kubernetes.io/projected/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-kube-api-access-mjjx5\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.846712 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.857561 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-config-data" (OuterVolumeSpecName: "config-data") pod "7de4c2d2-3468-4522-8f5d-5acf0d1c4806" (UID: "7de4c2d2-3468-4522-8f5d-5acf0d1c4806"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:34 crc kubenswrapper[4861]: I0129 06:55:34.948583 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de4c2d2-3468-4522-8f5d-5acf0d1c4806-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.534205 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.625553 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65644c46-5abc-41bb-8451-535ea55c6090","Type":"ContainerStarted","Data":"3852256dc818a21bdaae7cf50effd09612d29db5bc7c5aeb185e1cb8816e370b"} Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.632332 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7de4c2d2-3468-4522-8f5d-5acf0d1c4806","Type":"ContainerDied","Data":"ad6f3438c905e90b73f506c79f0dadb7c5fbbe874aed3ca2c0cff532317759b9"} Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.632390 4861 scope.go:117] "RemoveContainer" containerID="3997a1b69f09fc7100f9ce9317788cbf9762d68f7035879be4de1fcc25b2c936" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.632349 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.638967 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"467a8baa-1e95-4e23-b562-cc7901cfcbba","Type":"ContainerStarted","Data":"b54d2673047692ad6f1680306f53db0b6cce8f1cb90d91781c35d4abb5b55c2e"} Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.639014 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"467a8baa-1e95-4e23-b562-cc7901cfcbba","Type":"ContainerStarted","Data":"19e2a779a27878de26d39e43aff51b52e171b4239fe58edb48d8feb4f4b13271"} Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.639030 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.641790 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" event={"ID":"4e650b78-7110-40bc-b59e-fdaea7ecc619","Type":"ContainerStarted","Data":"5fe9c53e0fcd2cd94a63cb0cd318a1c2b1d8f53237727d05a34a0f21c8837e14"} Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.642079 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.681400 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.692201 4861 scope.go:117] "RemoveContainer" containerID="adf5a98d31ec7921da169da60ea413217b543186b9c30f6c3c4531f922be78c6" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.692798 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.722507 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.7224890840000002 podStartE2EDuration="2.722489084s" podCreationTimestamp="2026-01-29 06:55:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:35.704615948 +0000 UTC m=+1227.376110505" watchObservedRunningTime="2026-01-29 06:55:35.722489084 +0000 UTC m=+1227.393983641" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.746995 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:55:35 crc kubenswrapper[4861]: E0129 06:55:35.747459 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="ceilometer-notification-agent" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.747478 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="ceilometer-notification-agent" Jan 29 06:55:35 crc kubenswrapper[4861]: E0129 06:55:35.747498 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="sg-core" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.747505 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="sg-core" Jan 29 06:55:35 crc kubenswrapper[4861]: E0129 06:55:35.747524 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="proxy-httpd" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.747550 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="proxy-httpd" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.747747 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="proxy-httpd" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.747765 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="ceilometer-notification-agent" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.747786 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" containerName="sg-core" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.749330 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.751684 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.751874 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.764701 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.780268 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" podStartSLOduration=3.78024962 podStartE2EDuration="3.78024962s" podCreationTimestamp="2026-01-29 06:55:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:35.728137188 +0000 UTC m=+1227.399631755" watchObservedRunningTime="2026-01-29 06:55:35.78024962 +0000 UTC m=+1227.451744187" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.839646 4861 scope.go:117] "RemoveContainer" containerID="755619eeeb3119d470cf51a2e24d614ba425009925bd792e194519f0d1a42aaf" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.869401 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-run-httpd\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.869474 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.869496 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-config-data\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.869550 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.869593 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nf2p\" (UniqueName: \"kubernetes.io/projected/0e17948c-a091-4786-8319-f166892424e1-kube-api-access-5nf2p\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.869632 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-scripts\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.869655 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-log-httpd\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.972819 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.973998 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nf2p\" (UniqueName: \"kubernetes.io/projected/0e17948c-a091-4786-8319-f166892424e1-kube-api-access-5nf2p\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.974027 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-scripts\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.974097 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-log-httpd\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.974215 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-run-httpd\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.974290 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.974350 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-config-data\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.974687 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-log-httpd\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.974785 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-run-httpd\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.978735 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:35 crc kubenswrapper[4861]: I0129 06:55:35.980962 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-config-data\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:35.992103 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:35.994094 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-scripts\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:36.000947 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nf2p\" (UniqueName: \"kubernetes.io/projected/0e17948c-a091-4786-8319-f166892424e1-kube-api-access-5nf2p\") pod \"ceilometer-0\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " pod="openstack/ceilometer-0" Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:36.110511 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:36.660955 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65644c46-5abc-41bb-8451-535ea55c6090","Type":"ContainerStarted","Data":"793386bbb95d03061778ef7ebacfc530b90d37f58eb8c90daa9cee8be4d784a3"} Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:36.661287 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerName="cinder-api-log" containerID="cri-o://19e2a779a27878de26d39e43aff51b52e171b4239fe58edb48d8feb4f4b13271" gracePeriod=30 Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:36.661322 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerName="cinder-api" containerID="cri-o://b54d2673047692ad6f1680306f53db0b6cce8f1cb90d91781c35d4abb5b55c2e" gracePeriod=30 Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:36.693598 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.933214218 podStartE2EDuration="4.69358395s" podCreationTimestamp="2026-01-29 06:55:32 +0000 UTC" firstStartedPulling="2026-01-29 06:55:33.780331804 +0000 UTC m=+1225.451826361" lastFinishedPulling="2026-01-29 06:55:34.540701536 +0000 UTC m=+1226.212196093" observedRunningTime="2026-01-29 06:55:36.692641416 +0000 UTC m=+1228.364135983" watchObservedRunningTime="2026-01-29 06:55:36.69358395 +0000 UTC m=+1228.365078507" Jan 29 06:55:36 crc kubenswrapper[4861]: I0129 06:55:36.725389 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.130801 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7de4c2d2-3468-4522-8f5d-5acf0d1c4806" path="/var/lib/kubelet/pods/7de4c2d2-3468-4522-8f5d-5acf0d1c4806/volumes" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.690094 4861 generic.go:334] "Generic (PLEG): container finished" podID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerID="b54d2673047692ad6f1680306f53db0b6cce8f1cb90d91781c35d4abb5b55c2e" exitCode=0 Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.690544 4861 generic.go:334] "Generic (PLEG): container finished" podID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerID="19e2a779a27878de26d39e43aff51b52e171b4239fe58edb48d8feb4f4b13271" exitCode=143 Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.690304 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"467a8baa-1e95-4e23-b562-cc7901cfcbba","Type":"ContainerDied","Data":"b54d2673047692ad6f1680306f53db0b6cce8f1cb90d91781c35d4abb5b55c2e"} Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.690643 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"467a8baa-1e95-4e23-b562-cc7901cfcbba","Type":"ContainerDied","Data":"19e2a779a27878de26d39e43aff51b52e171b4239fe58edb48d8feb4f4b13271"} Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.693512 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerStarted","Data":"0e895341b1ce6cf6d03f93c74e91d445498a819b791904138cf32b44a15d8b82"} Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.693538 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerStarted","Data":"f12ecf8447c7ddaf5baa3c8037e02a2dff2e29f40a143ed88387236171676808"} Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.749703 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.810092 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data-custom\") pod \"467a8baa-1e95-4e23-b562-cc7901cfcbba\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.810142 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-scripts\") pod \"467a8baa-1e95-4e23-b562-cc7901cfcbba\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.810180 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/467a8baa-1e95-4e23-b562-cc7901cfcbba-logs\") pod \"467a8baa-1e95-4e23-b562-cc7901cfcbba\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.810240 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-combined-ca-bundle\") pod \"467a8baa-1e95-4e23-b562-cc7901cfcbba\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.810257 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/467a8baa-1e95-4e23-b562-cc7901cfcbba-etc-machine-id\") pod \"467a8baa-1e95-4e23-b562-cc7901cfcbba\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.810273 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data\") pod \"467a8baa-1e95-4e23-b562-cc7901cfcbba\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.810320 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zp89\" (UniqueName: \"kubernetes.io/projected/467a8baa-1e95-4e23-b562-cc7901cfcbba-kube-api-access-8zp89\") pod \"467a8baa-1e95-4e23-b562-cc7901cfcbba\" (UID: \"467a8baa-1e95-4e23-b562-cc7901cfcbba\") " Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.811560 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/467a8baa-1e95-4e23-b562-cc7901cfcbba-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "467a8baa-1e95-4e23-b562-cc7901cfcbba" (UID: "467a8baa-1e95-4e23-b562-cc7901cfcbba"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.811648 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/467a8baa-1e95-4e23-b562-cc7901cfcbba-logs" (OuterVolumeSpecName: "logs") pod "467a8baa-1e95-4e23-b562-cc7901cfcbba" (UID: "467a8baa-1e95-4e23-b562-cc7901cfcbba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.816253 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "467a8baa-1e95-4e23-b562-cc7901cfcbba" (UID: "467a8baa-1e95-4e23-b562-cc7901cfcbba"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.816305 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-scripts" (OuterVolumeSpecName: "scripts") pod "467a8baa-1e95-4e23-b562-cc7901cfcbba" (UID: "467a8baa-1e95-4e23-b562-cc7901cfcbba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.816557 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/467a8baa-1e95-4e23-b562-cc7901cfcbba-kube-api-access-8zp89" (OuterVolumeSpecName: "kube-api-access-8zp89") pod "467a8baa-1e95-4e23-b562-cc7901cfcbba" (UID: "467a8baa-1e95-4e23-b562-cc7901cfcbba"). InnerVolumeSpecName "kube-api-access-8zp89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.836935 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "467a8baa-1e95-4e23-b562-cc7901cfcbba" (UID: "467a8baa-1e95-4e23-b562-cc7901cfcbba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.866188 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data" (OuterVolumeSpecName: "config-data") pod "467a8baa-1e95-4e23-b562-cc7901cfcbba" (UID: "467a8baa-1e95-4e23-b562-cc7901cfcbba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.912674 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.912708 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.912724 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/467a8baa-1e95-4e23-b562-cc7901cfcbba-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.912736 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.912747 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/467a8baa-1e95-4e23-b562-cc7901cfcbba-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.912758 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/467a8baa-1e95-4e23-b562-cc7901cfcbba-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:37 crc kubenswrapper[4861]: I0129 06:55:37.912766 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zp89\" (UniqueName: \"kubernetes.io/projected/467a8baa-1e95-4e23-b562-cc7901cfcbba-kube-api-access-8zp89\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.198680 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 06:55:38 crc kubenswrapper[4861]: E0129 06:55:38.382278 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice/crio-ddf002d2b21cac7beb20b97fe76c1d0bda80dbc4238c13fb2244922405a64059\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice\": RecentStats: unable to find data in memory cache]" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.539509 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.733014 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.737009 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"467a8baa-1e95-4e23-b562-cc7901cfcbba","Type":"ContainerDied","Data":"c1d17bae8d94885a47e84fa56127ef711248919ea0114742d8e4e3e186a1420f"} Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.737121 4861 scope.go:117] "RemoveContainer" containerID="b54d2673047692ad6f1680306f53db0b6cce8f1cb90d91781c35d4abb5b55c2e" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.739175 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerStarted","Data":"ec43666028ab1955761002ca6a31c9bfc6f661f016a4f35f55b6514337545ae2"} Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.789264 4861 scope.go:117] "RemoveContainer" containerID="19e2a779a27878de26d39e43aff51b52e171b4239fe58edb48d8feb4f4b13271" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.796066 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6896cf67f5-ztfst"] Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.797176 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6896cf67f5-ztfst" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-api" containerID="cri-o://6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50" gracePeriod=30 Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.801108 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6896cf67f5-ztfst" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-httpd" containerID="cri-o://4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee" gracePeriod=30 Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.816272 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.829784 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-6896cf67f5-ztfst" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.158:9696/\": read tcp 10.217.0.2:36916->10.217.0.158:9696: read: connection reset by peer" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.835713 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.857615 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7f868dbfb9-5bdsk"] Jan 29 06:55:38 crc kubenswrapper[4861]: E0129 06:55:38.858211 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerName="cinder-api-log" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.858226 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerName="cinder-api-log" Jan 29 06:55:38 crc kubenswrapper[4861]: E0129 06:55:38.858238 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerName="cinder-api" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.858246 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerName="cinder-api" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.858493 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerName="cinder-api-log" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.858525 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" containerName="cinder-api" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.859680 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.873186 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f868dbfb9-5bdsk"] Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.887788 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.889252 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.892759 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.892867 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.892995 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.899239 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939011 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkh4z\" (UniqueName: \"kubernetes.io/projected/6c25bacb-4105-4fa4-a798-117f9cbe75fe-kube-api-access-jkh4z\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939049 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-public-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939089 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939142 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-public-tls-certs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939165 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939178 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkr9p\" (UniqueName: \"kubernetes.io/projected/ce4279f2-eded-42d5-9353-5235a6b7d64e-kube-api-access-bkr9p\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939204 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c25bacb-4105-4fa4-a798-117f9cbe75fe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939219 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-internal-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939241 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-ovndb-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939257 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939273 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939308 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data-custom\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939324 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939421 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-combined-ca-bundle\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939440 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-scripts\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:38 crc kubenswrapper[4861]: I0129 06:55:38.939454 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c25bacb-4105-4fa4-a798-117f9cbe75fe-logs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.041680 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkh4z\" (UniqueName: \"kubernetes.io/projected/6c25bacb-4105-4fa4-a798-117f9cbe75fe-kube-api-access-jkh4z\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.041982 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-public-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042013 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042060 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-public-tls-certs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042097 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042116 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkr9p\" (UniqueName: \"kubernetes.io/projected/ce4279f2-eded-42d5-9353-5235a6b7d64e-kube-api-access-bkr9p\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042142 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c25bacb-4105-4fa4-a798-117f9cbe75fe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042158 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-internal-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042182 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-ovndb-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042198 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042217 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042255 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data-custom\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042272 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042298 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-combined-ca-bundle\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042318 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-scripts\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042332 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c25bacb-4105-4fa4-a798-117f9cbe75fe-logs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.042712 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c25bacb-4105-4fa4-a798-117f9cbe75fe-logs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.043007 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c25bacb-4105-4fa4-a798-117f9cbe75fe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.046711 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-public-tls-certs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.048234 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-internal-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.048595 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.049621 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data-custom\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.050503 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.050950 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-scripts\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.051282 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-combined-ca-bundle\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.053845 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-public-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.055563 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.058151 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-ovndb-tls-certs\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.058530 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.060277 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.060885 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkh4z\" (UniqueName: \"kubernetes.io/projected/6c25bacb-4105-4fa4-a798-117f9cbe75fe-kube-api-access-jkh4z\") pod \"cinder-api-0\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.061701 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkr9p\" (UniqueName: \"kubernetes.io/projected/ce4279f2-eded-42d5-9353-5235a6b7d64e-kube-api-access-bkr9p\") pod \"neutron-7f868dbfb9-5bdsk\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.135057 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="467a8baa-1e95-4e23-b562-cc7901cfcbba" path="/var/lib/kubelet/pods/467a8baa-1e95-4e23-b562-cc7901cfcbba/volumes" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.193270 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.215131 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.676453 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.759674 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerStarted","Data":"b839bbf835143eefafd14bb089237a16fb28f459121cfb5ea699317d4a8350a7"} Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.763056 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6c25bacb-4105-4fa4-a798-117f9cbe75fe","Type":"ContainerStarted","Data":"5c5c830c9d89a378027db88210b7e21af05dd3224886f16d3185b764989e2543"} Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.765218 4861 generic.go:334] "Generic (PLEG): container finished" podID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerID="4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee" exitCode=0 Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.765248 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6896cf67f5-ztfst" event={"ID":"b670e77a-b666-4fe3-bc2f-8ceb13c819a0","Type":"ContainerDied","Data":"4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee"} Jan 29 06:55:39 crc kubenswrapper[4861]: I0129 06:55:39.803041 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f868dbfb9-5bdsk"] Jan 29 06:55:39 crc kubenswrapper[4861]: W0129 06:55:39.806207 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce4279f2_eded_42d5_9353_5235a6b7d64e.slice/crio-78205a1a4450fceb7130e9998c3b6dbdfcca1d4c192cbe04db325a7a9943c942 WatchSource:0}: Error finding container 78205a1a4450fceb7130e9998c3b6dbdfcca1d4c192cbe04db325a7a9943c942: Status 404 returned error can't find the container with id 78205a1a4450fceb7130e9998c3b6dbdfcca1d4c192cbe04db325a7a9943c942 Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.789252 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f868dbfb9-5bdsk" event={"ID":"ce4279f2-eded-42d5-9353-5235a6b7d64e","Type":"ContainerStarted","Data":"fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2"} Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.789822 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f868dbfb9-5bdsk" event={"ID":"ce4279f2-eded-42d5-9353-5235a6b7d64e","Type":"ContainerStarted","Data":"ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e"} Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.789833 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f868dbfb9-5bdsk" event={"ID":"ce4279f2-eded-42d5-9353-5235a6b7d64e","Type":"ContainerStarted","Data":"78205a1a4450fceb7130e9998c3b6dbdfcca1d4c192cbe04db325a7a9943c942"} Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.790358 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.794370 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerStarted","Data":"b1181b835253eb664b853550944b522e7fcb43b8c1bd5a58de2f99221cc43a2c"} Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.794869 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.799401 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6c25bacb-4105-4fa4-a798-117f9cbe75fe","Type":"ContainerStarted","Data":"1040f23ce9435abf9d98fe86eda2bd1c172d7b64d769b973274d71099bd7ad84"} Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.816394 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7f868dbfb9-5bdsk" podStartSLOduration=2.816379143 podStartE2EDuration="2.816379143s" podCreationTimestamp="2026-01-29 06:55:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:40.805983437 +0000 UTC m=+1232.477477994" watchObservedRunningTime="2026-01-29 06:55:40.816379143 +0000 UTC m=+1232.487873700" Jan 29 06:55:40 crc kubenswrapper[4861]: I0129 06:55:40.836978 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.120504266 podStartE2EDuration="5.836961629s" podCreationTimestamp="2026-01-29 06:55:35 +0000 UTC" firstStartedPulling="2026-01-29 06:55:36.73822573 +0000 UTC m=+1228.409720287" lastFinishedPulling="2026-01-29 06:55:40.454683093 +0000 UTC m=+1232.126177650" observedRunningTime="2026-01-29 06:55:40.832966866 +0000 UTC m=+1232.504461433" watchObservedRunningTime="2026-01-29 06:55:40.836961629 +0000 UTC m=+1232.508456186" Jan 29 06:55:41 crc kubenswrapper[4861]: I0129 06:55:41.652299 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-6896cf67f5-ztfst" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.158:9696/\": dial tcp 10.217.0.158:9696: connect: connection refused" Jan 29 06:55:41 crc kubenswrapper[4861]: I0129 06:55:41.809838 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6c25bacb-4105-4fa4-a798-117f9cbe75fe","Type":"ContainerStarted","Data":"dfde65f746a284f0742bfc416b14134fe608d8f0ca69edc5ab5445ada8954bbe"} Jan 29 06:55:41 crc kubenswrapper[4861]: I0129 06:55:41.838938 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.838912264 podStartE2EDuration="3.838912264s" podCreationTimestamp="2026-01-29 06:55:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:41.834038338 +0000 UTC m=+1233.505532905" watchObservedRunningTime="2026-01-29 06:55:41.838912264 +0000 UTC m=+1233.510406821" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.432706 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.514097 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-combined-ca-bundle\") pod \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.514204 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-public-tls-certs\") pod \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.514258 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-config\") pod \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.514319 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-ovndb-tls-certs\") pod \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.515924 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-httpd-config\") pod \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.515974 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-internal-tls-certs\") pod \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.516035 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zs64\" (UniqueName: \"kubernetes.io/projected/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-kube-api-access-8zs64\") pod \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\" (UID: \"b670e77a-b666-4fe3-bc2f-8ceb13c819a0\") " Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.521226 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-kube-api-access-8zs64" (OuterVolumeSpecName: "kube-api-access-8zs64") pod "b670e77a-b666-4fe3-bc2f-8ceb13c819a0" (UID: "b670e77a-b666-4fe3-bc2f-8ceb13c819a0"). InnerVolumeSpecName "kube-api-access-8zs64". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.523058 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b670e77a-b666-4fe3-bc2f-8ceb13c819a0" (UID: "b670e77a-b666-4fe3-bc2f-8ceb13c819a0"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.577514 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b670e77a-b666-4fe3-bc2f-8ceb13c819a0" (UID: "b670e77a-b666-4fe3-bc2f-8ceb13c819a0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.579336 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b670e77a-b666-4fe3-bc2f-8ceb13c819a0" (UID: "b670e77a-b666-4fe3-bc2f-8ceb13c819a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.596330 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b670e77a-b666-4fe3-bc2f-8ceb13c819a0" (UID: "b670e77a-b666-4fe3-bc2f-8ceb13c819a0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.600770 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "b670e77a-b666-4fe3-bc2f-8ceb13c819a0" (UID: "b670e77a-b666-4fe3-bc2f-8ceb13c819a0"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.611357 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-config" (OuterVolumeSpecName: "config") pod "b670e77a-b666-4fe3-bc2f-8ceb13c819a0" (UID: "b670e77a-b666-4fe3-bc2f-8ceb13c819a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.618559 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.618690 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.618757 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zs64\" (UniqueName: \"kubernetes.io/projected/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-kube-api-access-8zs64\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.618833 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.618885 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.618942 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.619015 4861 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b670e77a-b666-4fe3-bc2f-8ceb13c819a0-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.832901 4861 generic.go:334] "Generic (PLEG): container finished" podID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerID="6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50" exitCode=0 Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.833018 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6896cf67f5-ztfst" event={"ID":"b670e77a-b666-4fe3-bc2f-8ceb13c819a0","Type":"ContainerDied","Data":"6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50"} Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.833036 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6896cf67f5-ztfst" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.833084 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6896cf67f5-ztfst" event={"ID":"b670e77a-b666-4fe3-bc2f-8ceb13c819a0","Type":"ContainerDied","Data":"eca500d9532c68f4405781394577ef6fbfd5ae83758e87b3864a35868b7052fb"} Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.833111 4861 scope.go:117] "RemoveContainer" containerID="4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.842287 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.880915 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6896cf67f5-ztfst"] Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.885827 4861 scope.go:117] "RemoveContainer" containerID="6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.892412 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6896cf67f5-ztfst"] Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.911472 4861 scope.go:117] "RemoveContainer" containerID="4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee" Jan 29 06:55:42 crc kubenswrapper[4861]: E0129 06:55:42.911895 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee\": container with ID starting with 4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee not found: ID does not exist" containerID="4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.911932 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee"} err="failed to get container status \"4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee\": rpc error: code = NotFound desc = could not find container \"4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee\": container with ID starting with 4b1280273880e4f42e14b8d890fbde95f1eeae06e75429108a8b70b76d8d57ee not found: ID does not exist" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.911963 4861 scope.go:117] "RemoveContainer" containerID="6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50" Jan 29 06:55:42 crc kubenswrapper[4861]: E0129 06:55:42.912325 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50\": container with ID starting with 6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50 not found: ID does not exist" containerID="6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50" Jan 29 06:55:42 crc kubenswrapper[4861]: I0129 06:55:42.912346 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50"} err="failed to get container status \"6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50\": rpc error: code = NotFound desc = could not find container \"6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50\": container with ID starting with 6d9bc0e1562ebea42c06d58c5036fe8d7ed5bc13c872f34dd8bc8ee3b634be50 not found: ID does not exist" Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.131422 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" path="/var/lib/kubelet/pods/b670e77a-b666-4fe3-bc2f-8ceb13c819a0/volumes" Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.290256 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.361691 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-98k9x"] Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.361923 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" podUID="946bf576-dbb6-4284-9794-8330f6213430" containerName="dnsmasq-dns" containerID="cri-o://311e24d561d23d381f387cca73e093ca8ea2c8e85a82b5ae4b7550323de0fad0" gracePeriod=10 Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.574868 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.699192 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.849539 4861 generic.go:334] "Generic (PLEG): container finished" podID="946bf576-dbb6-4284-9794-8330f6213430" containerID="311e24d561d23d381f387cca73e093ca8ea2c8e85a82b5ae4b7550323de0fad0" exitCode=0 Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.849739 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" event={"ID":"946bf576-dbb6-4284-9794-8330f6213430","Type":"ContainerDied","Data":"311e24d561d23d381f387cca73e093ca8ea2c8e85a82b5ae4b7550323de0fad0"} Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.861333 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="65644c46-5abc-41bb-8451-535ea55c6090" containerName="cinder-scheduler" containerID="cri-o://3852256dc818a21bdaae7cf50effd09612d29db5bc7c5aeb185e1cb8816e370b" gracePeriod=30 Jan 29 06:55:43 crc kubenswrapper[4861]: I0129 06:55:43.862695 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="65644c46-5abc-41bb-8451-535ea55c6090" containerName="probe" containerID="cri-o://793386bbb95d03061778ef7ebacfc530b90d37f58eb8c90daa9cee8be4d784a3" gracePeriod=30 Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.091853 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.155686 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-config\") pod \"946bf576-dbb6-4284-9794-8330f6213430\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.155776 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-nb\") pod \"946bf576-dbb6-4284-9794-8330f6213430\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.155815 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dsqq\" (UniqueName: \"kubernetes.io/projected/946bf576-dbb6-4284-9794-8330f6213430-kube-api-access-9dsqq\") pod \"946bf576-dbb6-4284-9794-8330f6213430\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.155834 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-swift-storage-0\") pod \"946bf576-dbb6-4284-9794-8330f6213430\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.162216 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/946bf576-dbb6-4284-9794-8330f6213430-kube-api-access-9dsqq" (OuterVolumeSpecName: "kube-api-access-9dsqq") pod "946bf576-dbb6-4284-9794-8330f6213430" (UID: "946bf576-dbb6-4284-9794-8330f6213430"). InnerVolumeSpecName "kube-api-access-9dsqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.236220 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "946bf576-dbb6-4284-9794-8330f6213430" (UID: "946bf576-dbb6-4284-9794-8330f6213430"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.243561 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-config" (OuterVolumeSpecName: "config") pod "946bf576-dbb6-4284-9794-8330f6213430" (UID: "946bf576-dbb6-4284-9794-8330f6213430"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.256771 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-svc\") pod \"946bf576-dbb6-4284-9794-8330f6213430\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.256948 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-sb\") pod \"946bf576-dbb6-4284-9794-8330f6213430\" (UID: \"946bf576-dbb6-4284-9794-8330f6213430\") " Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.257189 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.257211 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dsqq\" (UniqueName: \"kubernetes.io/projected/946bf576-dbb6-4284-9794-8330f6213430-kube-api-access-9dsqq\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.257221 4861 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.296356 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "946bf576-dbb6-4284-9794-8330f6213430" (UID: "946bf576-dbb6-4284-9794-8330f6213430"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.303349 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "946bf576-dbb6-4284-9794-8330f6213430" (UID: "946bf576-dbb6-4284-9794-8330f6213430"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.317279 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "946bf576-dbb6-4284-9794-8330f6213430" (UID: "946bf576-dbb6-4284-9794-8330f6213430"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.358377 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.358416 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.358426 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946bf576-dbb6-4284-9794-8330f6213430-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.870355 4861 generic.go:334] "Generic (PLEG): container finished" podID="65644c46-5abc-41bb-8451-535ea55c6090" containerID="793386bbb95d03061778ef7ebacfc530b90d37f58eb8c90daa9cee8be4d784a3" exitCode=0 Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.870636 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65644c46-5abc-41bb-8451-535ea55c6090","Type":"ContainerDied","Data":"793386bbb95d03061778ef7ebacfc530b90d37f58eb8c90daa9cee8be4d784a3"} Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.872279 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" event={"ID":"946bf576-dbb6-4284-9794-8330f6213430","Type":"ContainerDied","Data":"fc391acc8982161c95293203473109aa3f114ee676eef921f6aa4da214bb8ec0"} Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.872311 4861 scope.go:117] "RemoveContainer" containerID="311e24d561d23d381f387cca73e093ca8ea2c8e85a82b5ae4b7550323de0fad0" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.872429 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cdd4b5b5-98k9x" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.902875 4861 scope.go:117] "RemoveContainer" containerID="96a5f8f30b42d1d9a073bc7865d209c3c2028cf79d35e54a60c32d5021bed97e" Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.920118 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-98k9x"] Jan 29 06:55:44 crc kubenswrapper[4861]: I0129 06:55:44.928405 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-98k9x"] Jan 29 06:55:45 crc kubenswrapper[4861]: I0129 06:55:45.126666 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="946bf576-dbb6-4284-9794-8330f6213430" path="/var/lib/kubelet/pods/946bf576-dbb6-4284-9794-8330f6213430/volumes" Jan 29 06:55:45 crc kubenswrapper[4861]: I0129 06:55:45.365179 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.670146 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 06:55:46 crc kubenswrapper[4861]: E0129 06:55:46.670837 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-httpd" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.670851 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-httpd" Jan 29 06:55:46 crc kubenswrapper[4861]: E0129 06:55:46.670872 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="946bf576-dbb6-4284-9794-8330f6213430" containerName="init" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.670878 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="946bf576-dbb6-4284-9794-8330f6213430" containerName="init" Jan 29 06:55:46 crc kubenswrapper[4861]: E0129 06:55:46.670903 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="946bf576-dbb6-4284-9794-8330f6213430" containerName="dnsmasq-dns" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.670910 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="946bf576-dbb6-4284-9794-8330f6213430" containerName="dnsmasq-dns" Jan 29 06:55:46 crc kubenswrapper[4861]: E0129 06:55:46.670919 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-api" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.670924 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-api" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.671117 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="946bf576-dbb6-4284-9794-8330f6213430" containerName="dnsmasq-dns" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.671135 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-httpd" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.671149 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b670e77a-b666-4fe3-bc2f-8ceb13c819a0" containerName="neutron-api" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.671837 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.680811 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.681166 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-hq7gw" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.681336 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.683501 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.711157 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config-secret\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.711205 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmqzt\" (UniqueName: \"kubernetes.io/projected/4c1315db-486b-4b63-bdb0-630c247d49b4-kube-api-access-mmqzt\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.711222 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.711240 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.832058 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config-secret\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.832135 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmqzt\" (UniqueName: \"kubernetes.io/projected/4c1315db-486b-4b63-bdb0-630c247d49b4-kube-api-access-mmqzt\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.832159 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.832178 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.834130 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.849590 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config-secret\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.850811 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.858346 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmqzt\" (UniqueName: \"kubernetes.io/projected/4c1315db-486b-4b63-bdb0-630c247d49b4-kube-api-access-mmqzt\") pod \"openstackclient\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " pod="openstack/openstackclient" Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.915697 4861 generic.go:334] "Generic (PLEG): container finished" podID="65644c46-5abc-41bb-8451-535ea55c6090" containerID="3852256dc818a21bdaae7cf50effd09612d29db5bc7c5aeb185e1cb8816e370b" exitCode=0 Jan 29 06:55:46 crc kubenswrapper[4861]: I0129 06:55:46.915733 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65644c46-5abc-41bb-8451-535ea55c6090","Type":"ContainerDied","Data":"3852256dc818a21bdaae7cf50effd09612d29db5bc7c5aeb185e1cb8816e370b"} Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.043258 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.058246 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.248602 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data\") pod \"65644c46-5abc-41bb-8451-535ea55c6090\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.248767 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65644c46-5abc-41bb-8451-535ea55c6090-etc-machine-id\") pod \"65644c46-5abc-41bb-8451-535ea55c6090\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.248822 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-combined-ca-bundle\") pod \"65644c46-5abc-41bb-8451-535ea55c6090\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.248901 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data-custom\") pod \"65644c46-5abc-41bb-8451-535ea55c6090\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.248928 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-scripts\") pod \"65644c46-5abc-41bb-8451-535ea55c6090\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.248963 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcl4c\" (UniqueName: \"kubernetes.io/projected/65644c46-5abc-41bb-8451-535ea55c6090-kube-api-access-mcl4c\") pod \"65644c46-5abc-41bb-8451-535ea55c6090\" (UID: \"65644c46-5abc-41bb-8451-535ea55c6090\") " Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.255341 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65644c46-5abc-41bb-8451-535ea55c6090-kube-api-access-mcl4c" (OuterVolumeSpecName: "kube-api-access-mcl4c") pod "65644c46-5abc-41bb-8451-535ea55c6090" (UID: "65644c46-5abc-41bb-8451-535ea55c6090"). InnerVolumeSpecName "kube-api-access-mcl4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.256614 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/65644c46-5abc-41bb-8451-535ea55c6090-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "65644c46-5abc-41bb-8451-535ea55c6090" (UID: "65644c46-5abc-41bb-8451-535ea55c6090"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.260928 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "65644c46-5abc-41bb-8451-535ea55c6090" (UID: "65644c46-5abc-41bb-8451-535ea55c6090"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.266232 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-scripts" (OuterVolumeSpecName: "scripts") pod "65644c46-5abc-41bb-8451-535ea55c6090" (UID: "65644c46-5abc-41bb-8451-535ea55c6090"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.338817 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65644c46-5abc-41bb-8451-535ea55c6090" (UID: "65644c46-5abc-41bb-8451-535ea55c6090"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.350550 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65644c46-5abc-41bb-8451-535ea55c6090-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.350579 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.350614 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.350623 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.350632 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcl4c\" (UniqueName: \"kubernetes.io/projected/65644c46-5abc-41bb-8451-535ea55c6090-kube-api-access-mcl4c\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.441684 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data" (OuterVolumeSpecName: "config-data") pod "65644c46-5abc-41bb-8451-535ea55c6090" (UID: "65644c46-5abc-41bb-8451-535ea55c6090"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.451791 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65644c46-5abc-41bb-8451-535ea55c6090-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.558017 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.930125 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"4c1315db-486b-4b63-bdb0-630c247d49b4","Type":"ContainerStarted","Data":"fc2bbb059de5c4ef9ed9a5d96a62b5e75f985f191606ea34761122b77ba82e7f"} Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.933653 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65644c46-5abc-41bb-8451-535ea55c6090","Type":"ContainerDied","Data":"bf038990e249c7f22ec0f126f032513f1ed44643f80f86a50bcfc06cb98bb689"} Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.933698 4861 scope.go:117] "RemoveContainer" containerID="793386bbb95d03061778ef7ebacfc530b90d37f58eb8c90daa9cee8be4d784a3" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.933852 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.986196 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:47 crc kubenswrapper[4861]: I0129 06:55:47.998595 4861 scope.go:117] "RemoveContainer" containerID="3852256dc818a21bdaae7cf50effd09612d29db5bc7c5aeb185e1cb8816e370b" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.008177 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.020382 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:48 crc kubenswrapper[4861]: E0129 06:55:48.020768 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65644c46-5abc-41bb-8451-535ea55c6090" containerName="probe" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.020784 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="65644c46-5abc-41bb-8451-535ea55c6090" containerName="probe" Jan 29 06:55:48 crc kubenswrapper[4861]: E0129 06:55:48.020813 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65644c46-5abc-41bb-8451-535ea55c6090" containerName="cinder-scheduler" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.020820 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="65644c46-5abc-41bb-8451-535ea55c6090" containerName="cinder-scheduler" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.020989 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="65644c46-5abc-41bb-8451-535ea55c6090" containerName="probe" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.021009 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="65644c46-5abc-41bb-8451-535ea55c6090" containerName="cinder-scheduler" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.021871 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.046812 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.049353 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.168115 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.168305 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.168458 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.168932 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6v9x\" (UniqueName: \"kubernetes.io/projected/b1c31da3-c703-4d07-82e5-b02fe841a548-kube-api-access-c6v9x\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.168962 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-scripts\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.169044 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b1c31da3-c703-4d07-82e5-b02fe841a548-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.270625 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.270715 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.270835 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6v9x\" (UniqueName: \"kubernetes.io/projected/b1c31da3-c703-4d07-82e5-b02fe841a548-kube-api-access-c6v9x\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.270861 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-scripts\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.270936 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b1c31da3-c703-4d07-82e5-b02fe841a548-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.270977 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.273396 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b1c31da3-c703-4d07-82e5-b02fe841a548-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.278961 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.279000 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-scripts\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.279598 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.289961 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.291448 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6v9x\" (UniqueName: \"kubernetes.io/projected/b1c31da3-c703-4d07-82e5-b02fe841a548-kube-api-access-c6v9x\") pod \"cinder-scheduler-0\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.366694 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 06:55:48 crc kubenswrapper[4861]: E0129 06:55:48.621615 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice/crio-ddf002d2b21cac7beb20b97fe76c1d0bda80dbc4238c13fb2244922405a64059\": RecentStats: unable to find data in memory cache]" Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.667968 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:55:48 crc kubenswrapper[4861]: I0129 06:55:48.944185 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b1c31da3-c703-4d07-82e5-b02fe841a548","Type":"ContainerStarted","Data":"b1a329988008787d1cfa716266a509dd11d47339ff28b145e6b29bdaaf290f98"} Jan 29 06:55:49 crc kubenswrapper[4861]: I0129 06:55:49.126133 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65644c46-5abc-41bb-8451-535ea55c6090" path="/var/lib/kubelet/pods/65644c46-5abc-41bb-8451-535ea55c6090/volumes" Jan 29 06:55:49 crc kubenswrapper[4861]: I0129 06:55:49.970111 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b1c31da3-c703-4d07-82e5-b02fe841a548","Type":"ContainerStarted","Data":"9fd75cdc74ab09059f227b09ddff0f8e7c83f3521d26bb3444075ede07ae852b"} Jan 29 06:55:49 crc kubenswrapper[4861]: I0129 06:55:49.970435 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b1c31da3-c703-4d07-82e5-b02fe841a548","Type":"ContainerStarted","Data":"fd9675094dbc5e4671db27d2d11399c30b5682eca9316dcb9802ca14217ef8f4"} Jan 29 06:55:49 crc kubenswrapper[4861]: I0129 06:55:49.990883 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.990861382 podStartE2EDuration="2.990861382s" podCreationTimestamp="2026-01-29 06:55:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:55:49.986121549 +0000 UTC m=+1241.657616116" watchObservedRunningTime="2026-01-29 06:55:49.990861382 +0000 UTC m=+1241.662355949" Jan 29 06:55:51 crc kubenswrapper[4861]: I0129 06:55:51.170550 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 29 06:55:51 crc kubenswrapper[4861]: I0129 06:55:51.930198 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:55:51 crc kubenswrapper[4861]: I0129 06:55:51.930742 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="ceilometer-central-agent" containerID="cri-o://0e895341b1ce6cf6d03f93c74e91d445498a819b791904138cf32b44a15d8b82" gracePeriod=30 Jan 29 06:55:51 crc kubenswrapper[4861]: I0129 06:55:51.930800 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="proxy-httpd" containerID="cri-o://b1181b835253eb664b853550944b522e7fcb43b8c1bd5a58de2f99221cc43a2c" gracePeriod=30 Jan 29 06:55:51 crc kubenswrapper[4861]: I0129 06:55:51.930847 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="sg-core" containerID="cri-o://b839bbf835143eefafd14bb089237a16fb28f459121cfb5ea699317d4a8350a7" gracePeriod=30 Jan 29 06:55:51 crc kubenswrapper[4861]: I0129 06:55:51.930858 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="ceilometer-notification-agent" containerID="cri-o://ec43666028ab1955761002ca6a31c9bfc6f661f016a4f35f55b6514337545ae2" gracePeriod=30 Jan 29 06:55:51 crc kubenswrapper[4861]: I0129 06:55:51.935917 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.455934 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-58dc7dd48c-t4mkl"] Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.457403 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.462275 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.462493 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.471944 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.480162 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-58dc7dd48c-t4mkl"] Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.556365 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-public-tls-certs\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.556438 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-log-httpd\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.556476 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-internal-tls-certs\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.556497 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-config-data\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.556515 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-run-httpd\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.556543 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-combined-ca-bundle\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.556581 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-etc-swift\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.556609 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2x47v\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-kube-api-access-2x47v\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.658375 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-public-tls-certs\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.658726 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-log-httpd\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.658809 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-internal-tls-certs\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.658866 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-config-data\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.658895 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-run-httpd\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.658973 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-combined-ca-bundle\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.659019 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-etc-swift\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.659060 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2x47v\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-kube-api-access-2x47v\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.659603 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-log-httpd\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.665450 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-internal-tls-certs\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.668238 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-run-httpd\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.668732 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-combined-ca-bundle\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.669245 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-config-data\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.674118 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-public-tls-certs\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.680790 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2x47v\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-kube-api-access-2x47v\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.682444 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-etc-swift\") pod \"swift-proxy-58dc7dd48c-t4mkl\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:52 crc kubenswrapper[4861]: I0129 06:55:52.775346 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.016759 4861 generic.go:334] "Generic (PLEG): container finished" podID="0e17948c-a091-4786-8319-f166892424e1" containerID="b1181b835253eb664b853550944b522e7fcb43b8c1bd5a58de2f99221cc43a2c" exitCode=0 Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.017014 4861 generic.go:334] "Generic (PLEG): container finished" podID="0e17948c-a091-4786-8319-f166892424e1" containerID="b839bbf835143eefafd14bb089237a16fb28f459121cfb5ea699317d4a8350a7" exitCode=2 Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.017025 4861 generic.go:334] "Generic (PLEG): container finished" podID="0e17948c-a091-4786-8319-f166892424e1" containerID="0e895341b1ce6cf6d03f93c74e91d445498a819b791904138cf32b44a15d8b82" exitCode=0 Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.017060 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerDied","Data":"b1181b835253eb664b853550944b522e7fcb43b8c1bd5a58de2f99221cc43a2c"} Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.017129 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerDied","Data":"b839bbf835143eefafd14bb089237a16fb28f459121cfb5ea699317d4a8350a7"} Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.017164 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerDied","Data":"0e895341b1ce6cf6d03f93c74e91d445498a819b791904138cf32b44a15d8b82"} Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.342238 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-58dc7dd48c-t4mkl"] Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.367187 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.776280 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:53 crc kubenswrapper[4861]: I0129 06:55:53.799194 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6957679874-pnq22" Jan 29 06:55:54 crc kubenswrapper[4861]: I0129 06:55:54.031059 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" event={"ID":"1aa0d0b6-7731-421f-ac34-43cfd70e808c","Type":"ContainerStarted","Data":"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd"} Jan 29 06:55:54 crc kubenswrapper[4861]: I0129 06:55:54.031106 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" event={"ID":"1aa0d0b6-7731-421f-ac34-43cfd70e808c","Type":"ContainerStarted","Data":"bd42d5dd4244b8ae1a37a2d77ea5c3e74a44a32ed9d07c672f19264bf349ac12"} Jan 29 06:55:54 crc kubenswrapper[4861]: I0129 06:55:54.782971 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:55:54 crc kubenswrapper[4861]: I0129 06:55:54.783567 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerName="glance-log" containerID="cri-o://830b8ad78a9cb455d1b976b843a3c03005c610d03cc564242cb5c13c4c9476d0" gracePeriod=30 Jan 29 06:55:54 crc kubenswrapper[4861]: I0129 06:55:54.783667 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerName="glance-httpd" containerID="cri-o://3ebe082356931eb76e7562873162d36ba549bea59f8d99bc8ac0a4cfd7a0f83a" gracePeriod=30 Jan 29 06:55:55 crc kubenswrapper[4861]: I0129 06:55:55.040314 4861 generic.go:334] "Generic (PLEG): container finished" podID="0e17948c-a091-4786-8319-f166892424e1" containerID="ec43666028ab1955761002ca6a31c9bfc6f661f016a4f35f55b6514337545ae2" exitCode=0 Jan 29 06:55:55 crc kubenswrapper[4861]: I0129 06:55:55.040363 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerDied","Data":"ec43666028ab1955761002ca6a31c9bfc6f661f016a4f35f55b6514337545ae2"} Jan 29 06:55:55 crc kubenswrapper[4861]: I0129 06:55:55.041962 4861 generic.go:334] "Generic (PLEG): container finished" podID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerID="830b8ad78a9cb455d1b976b843a3c03005c610d03cc564242cb5c13c4c9476d0" exitCode=143 Jan 29 06:55:55 crc kubenswrapper[4861]: I0129 06:55:55.041992 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b","Type":"ContainerDied","Data":"830b8ad78a9cb455d1b976b843a3c03005c610d03cc564242cb5c13c4c9476d0"} Jan 29 06:55:55 crc kubenswrapper[4861]: I0129 06:55:55.996614 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-9g56k"] Jan 29 06:55:55 crc kubenswrapper[4861]: I0129 06:55:55.997927 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9g56k" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.011318 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-9g56k"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.104740 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-9wjtc"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.106123 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.119607 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d56467d-6815-4f11-9b52-06cd32a818ab-operator-scripts\") pod \"nova-api-db-create-9g56k\" (UID: \"9d56467d-6815-4f11-9b52-06cd32a818ab\") " pod="openstack/nova-api-db-create-9g56k" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.119658 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drvqt\" (UniqueName: \"kubernetes.io/projected/9d56467d-6815-4f11-9b52-06cd32a818ab-kube-api-access-drvqt\") pod \"nova-api-db-create-9g56k\" (UID: \"9d56467d-6815-4f11-9b52-06cd32a818ab\") " pod="openstack/nova-api-db-create-9g56k" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.142027 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-9wjtc"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.153441 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-733f-account-create-update-rmh9j"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.154506 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.161305 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.185800 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-733f-account-create-update-rmh9j"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.214628 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.214894 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerName="glance-log" containerID="cri-o://a37bdb21623a422cbac5028522ac71470fcf73cee302efbb609b5d8966142036" gracePeriod=30 Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.215026 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerName="glance-httpd" containerID="cri-o://0b5e0b509d93b270a78efdf4453478a55b8536121a28cd51c081d4a2b3fbd06b" gracePeriod=30 Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.225302 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jf79s\" (UniqueName: \"kubernetes.io/projected/25104968-4e44-41eb-be19-a88e17384e57-kube-api-access-jf79s\") pod \"nova-cell0-db-create-9wjtc\" (UID: \"25104968-4e44-41eb-be19-a88e17384e57\") " pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.225595 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25104968-4e44-41eb-be19-a88e17384e57-operator-scripts\") pod \"nova-cell0-db-create-9wjtc\" (UID: \"25104968-4e44-41eb-be19-a88e17384e57\") " pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.225789 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d56467d-6815-4f11-9b52-06cd32a818ab-operator-scripts\") pod \"nova-api-db-create-9g56k\" (UID: \"9d56467d-6815-4f11-9b52-06cd32a818ab\") " pod="openstack/nova-api-db-create-9g56k" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.225870 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drvqt\" (UniqueName: \"kubernetes.io/projected/9d56467d-6815-4f11-9b52-06cd32a818ab-kube-api-access-drvqt\") pod \"nova-api-db-create-9g56k\" (UID: \"9d56467d-6815-4f11-9b52-06cd32a818ab\") " pod="openstack/nova-api-db-create-9g56k" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.226854 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d56467d-6815-4f11-9b52-06cd32a818ab-operator-scripts\") pod \"nova-api-db-create-9g56k\" (UID: \"9d56467d-6815-4f11-9b52-06cd32a818ab\") " pod="openstack/nova-api-db-create-9g56k" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.243645 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drvqt\" (UniqueName: \"kubernetes.io/projected/9d56467d-6815-4f11-9b52-06cd32a818ab-kube-api-access-drvqt\") pod \"nova-api-db-create-9g56k\" (UID: \"9d56467d-6815-4f11-9b52-06cd32a818ab\") " pod="openstack/nova-api-db-create-9g56k" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.301162 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-g6s94"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.302283 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.317503 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9g56k" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.340173 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25104968-4e44-41eb-be19-a88e17384e57-operator-scripts\") pod \"nova-cell0-db-create-9wjtc\" (UID: \"25104968-4e44-41eb-be19-a88e17384e57\") " pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.340381 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzwjw\" (UniqueName: \"kubernetes.io/projected/a0631735-7992-4ca9-8564-2fb8c223a266-kube-api-access-qzwjw\") pod \"nova-api-733f-account-create-update-rmh9j\" (UID: \"a0631735-7992-4ca9-8564-2fb8c223a266\") " pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.340498 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0631735-7992-4ca9-8564-2fb8c223a266-operator-scripts\") pod \"nova-api-733f-account-create-update-rmh9j\" (UID: \"a0631735-7992-4ca9-8564-2fb8c223a266\") " pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.340566 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jf79s\" (UniqueName: \"kubernetes.io/projected/25104968-4e44-41eb-be19-a88e17384e57-kube-api-access-jf79s\") pod \"nova-cell0-db-create-9wjtc\" (UID: \"25104968-4e44-41eb-be19-a88e17384e57\") " pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.343796 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25104968-4e44-41eb-be19-a88e17384e57-operator-scripts\") pod \"nova-cell0-db-create-9wjtc\" (UID: \"25104968-4e44-41eb-be19-a88e17384e57\") " pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.360033 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-4jqs4"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.360663 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jf79s\" (UniqueName: \"kubernetes.io/projected/25104968-4e44-41eb-be19-a88e17384e57-kube-api-access-jf79s\") pod \"nova-cell0-db-create-9wjtc\" (UID: \"25104968-4e44-41eb-be19-a88e17384e57\") " pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.362660 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.365223 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.381228 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-g6s94"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.392927 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-4jqs4"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.438681 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.444215 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e04235da-615a-4098-9192-66537e3d2c8b-operator-scripts\") pod \"nova-cell0-1f01-account-create-update-4jqs4\" (UID: \"e04235da-615a-4098-9192-66537e3d2c8b\") " pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.444274 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-994cl\" (UniqueName: \"kubernetes.io/projected/1ed744f8-d389-4054-8bce-7a1f7b1be71f-kube-api-access-994cl\") pod \"nova-cell1-db-create-g6s94\" (UID: \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\") " pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.444322 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed744f8-d389-4054-8bce-7a1f7b1be71f-operator-scripts\") pod \"nova-cell1-db-create-g6s94\" (UID: \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\") " pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.444416 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x4m8\" (UniqueName: \"kubernetes.io/projected/e04235da-615a-4098-9192-66537e3d2c8b-kube-api-access-9x4m8\") pod \"nova-cell0-1f01-account-create-update-4jqs4\" (UID: \"e04235da-615a-4098-9192-66537e3d2c8b\") " pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.444466 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzwjw\" (UniqueName: \"kubernetes.io/projected/a0631735-7992-4ca9-8564-2fb8c223a266-kube-api-access-qzwjw\") pod \"nova-api-733f-account-create-update-rmh9j\" (UID: \"a0631735-7992-4ca9-8564-2fb8c223a266\") " pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.444641 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0631735-7992-4ca9-8564-2fb8c223a266-operator-scripts\") pod \"nova-api-733f-account-create-update-rmh9j\" (UID: \"a0631735-7992-4ca9-8564-2fb8c223a266\") " pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.445368 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0631735-7992-4ca9-8564-2fb8c223a266-operator-scripts\") pod \"nova-api-733f-account-create-update-rmh9j\" (UID: \"a0631735-7992-4ca9-8564-2fb8c223a266\") " pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.473558 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzwjw\" (UniqueName: \"kubernetes.io/projected/a0631735-7992-4ca9-8564-2fb8c223a266-kube-api-access-qzwjw\") pod \"nova-api-733f-account-create-update-rmh9j\" (UID: \"a0631735-7992-4ca9-8564-2fb8c223a266\") " pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.482437 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.546671 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e04235da-615a-4098-9192-66537e3d2c8b-operator-scripts\") pod \"nova-cell0-1f01-account-create-update-4jqs4\" (UID: \"e04235da-615a-4098-9192-66537e3d2c8b\") " pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.546713 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-994cl\" (UniqueName: \"kubernetes.io/projected/1ed744f8-d389-4054-8bce-7a1f7b1be71f-kube-api-access-994cl\") pod \"nova-cell1-db-create-g6s94\" (UID: \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\") " pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.546746 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed744f8-d389-4054-8bce-7a1f7b1be71f-operator-scripts\") pod \"nova-cell1-db-create-g6s94\" (UID: \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\") " pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.546787 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x4m8\" (UniqueName: \"kubernetes.io/projected/e04235da-615a-4098-9192-66537e3d2c8b-kube-api-access-9x4m8\") pod \"nova-cell0-1f01-account-create-update-4jqs4\" (UID: \"e04235da-615a-4098-9192-66537e3d2c8b\") " pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.547720 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e04235da-615a-4098-9192-66537e3d2c8b-operator-scripts\") pod \"nova-cell0-1f01-account-create-update-4jqs4\" (UID: \"e04235da-615a-4098-9192-66537e3d2c8b\") " pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.548394 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed744f8-d389-4054-8bce-7a1f7b1be71f-operator-scripts\") pod \"nova-cell1-db-create-g6s94\" (UID: \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\") " pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.553802 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-0391-account-create-update-9sb8t"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.555215 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.558124 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.565697 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0391-account-create-update-9sb8t"] Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.566586 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-994cl\" (UniqueName: \"kubernetes.io/projected/1ed744f8-d389-4054-8bce-7a1f7b1be71f-kube-api-access-994cl\") pod \"nova-cell1-db-create-g6s94\" (UID: \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\") " pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.586906 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x4m8\" (UniqueName: \"kubernetes.io/projected/e04235da-615a-4098-9192-66537e3d2c8b-kube-api-access-9x4m8\") pod \"nova-cell0-1f01-account-create-update-4jqs4\" (UID: \"e04235da-615a-4098-9192-66537e3d2c8b\") " pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.621471 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.648323 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2285a712-569a-411b-99b9-73d10212c822-operator-scripts\") pod \"nova-cell1-0391-account-create-update-9sb8t\" (UID: \"2285a712-569a-411b-99b9-73d10212c822\") " pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.648430 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwpl5\" (UniqueName: \"kubernetes.io/projected/2285a712-569a-411b-99b9-73d10212c822-kube-api-access-nwpl5\") pod \"nova-cell1-0391-account-create-update-9sb8t\" (UID: \"2285a712-569a-411b-99b9-73d10212c822\") " pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.724710 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.749966 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2285a712-569a-411b-99b9-73d10212c822-operator-scripts\") pod \"nova-cell1-0391-account-create-update-9sb8t\" (UID: \"2285a712-569a-411b-99b9-73d10212c822\") " pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.750032 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwpl5\" (UniqueName: \"kubernetes.io/projected/2285a712-569a-411b-99b9-73d10212c822-kube-api-access-nwpl5\") pod \"nova-cell1-0391-account-create-update-9sb8t\" (UID: \"2285a712-569a-411b-99b9-73d10212c822\") " pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.751247 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2285a712-569a-411b-99b9-73d10212c822-operator-scripts\") pod \"nova-cell1-0391-account-create-update-9sb8t\" (UID: \"2285a712-569a-411b-99b9-73d10212c822\") " pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.767426 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwpl5\" (UniqueName: \"kubernetes.io/projected/2285a712-569a-411b-99b9-73d10212c822-kube-api-access-nwpl5\") pod \"nova-cell1-0391-account-create-update-9sb8t\" (UID: \"2285a712-569a-411b-99b9-73d10212c822\") " pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:55:56 crc kubenswrapper[4861]: I0129 06:55:56.962262 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:55:57 crc kubenswrapper[4861]: I0129 06:55:57.063429 4861 generic.go:334] "Generic (PLEG): container finished" podID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerID="a37bdb21623a422cbac5028522ac71470fcf73cee302efbb609b5d8966142036" exitCode=143 Jan 29 06:55:57 crc kubenswrapper[4861]: I0129 06:55:57.063470 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4bf22b0a-5a8f-4f18-b259-8c43fac759dd","Type":"ContainerDied","Data":"a37bdb21623a422cbac5028522ac71470fcf73cee302efbb609b5d8966142036"} Jan 29 06:55:58 crc kubenswrapper[4861]: I0129 06:55:58.077849 4861 generic.go:334] "Generic (PLEG): container finished" podID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerID="3ebe082356931eb76e7562873162d36ba549bea59f8d99bc8ac0a4cfd7a0f83a" exitCode=0 Jan 29 06:55:58 crc kubenswrapper[4861]: I0129 06:55:58.078196 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b","Type":"ContainerDied","Data":"3ebe082356931eb76e7562873162d36ba549bea59f8d99bc8ac0a4cfd7a0f83a"} Jan 29 06:55:58 crc kubenswrapper[4861]: I0129 06:55:58.582393 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 06:55:58 crc kubenswrapper[4861]: E0129 06:55:58.855085 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice/crio-ddf002d2b21cac7beb20b97fe76c1d0bda80dbc4238c13fb2244922405a64059\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice\": RecentStats: unable to find data in memory cache]" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.021009 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.110157 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" event={"ID":"1aa0d0b6-7731-421f-ac34-43cfd70e808c","Type":"ContainerStarted","Data":"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499"} Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.111390 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.111422 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.115721 4861 generic.go:334] "Generic (PLEG): container finished" podID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerID="0b5e0b509d93b270a78efdf4453478a55b8536121a28cd51c081d4a2b3fbd06b" exitCode=0 Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.115781 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4bf22b0a-5a8f-4f18-b259-8c43fac759dd","Type":"ContainerDied","Data":"0b5e0b509d93b270a78efdf4453478a55b8536121a28cd51c081d4a2b3fbd06b"} Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.119339 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.119487 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e17948c-a091-4786-8319-f166892424e1","Type":"ContainerDied","Data":"f12ecf8447c7ddaf5baa3c8037e02a2dff2e29f40a143ed88387236171676808"} Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.119516 4861 scope.go:117] "RemoveContainer" containerID="b1181b835253eb664b853550944b522e7fcb43b8c1bd5a58de2f99221cc43a2c" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.126096 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.128506 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"4c1315db-486b-4b63-bdb0-630c247d49b4","Type":"ContainerStarted","Data":"46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b"} Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.131704 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-sg-core-conf-yaml\") pod \"0e17948c-a091-4786-8319-f166892424e1\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.131808 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-log-httpd\") pod \"0e17948c-a091-4786-8319-f166892424e1\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.131828 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-scripts\") pod \"0e17948c-a091-4786-8319-f166892424e1\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.131848 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-combined-ca-bundle\") pod \"0e17948c-a091-4786-8319-f166892424e1\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.131938 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-config-data\") pod \"0e17948c-a091-4786-8319-f166892424e1\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.131987 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-run-httpd\") pod \"0e17948c-a091-4786-8319-f166892424e1\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.132036 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nf2p\" (UniqueName: \"kubernetes.io/projected/0e17948c-a091-4786-8319-f166892424e1-kube-api-access-5nf2p\") pod \"0e17948c-a091-4786-8319-f166892424e1\" (UID: \"0e17948c-a091-4786-8319-f166892424e1\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.138509 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0e17948c-a091-4786-8319-f166892424e1" (UID: "0e17948c-a091-4786-8319-f166892424e1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.144199 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-scripts" (OuterVolumeSpecName: "scripts") pod "0e17948c-a091-4786-8319-f166892424e1" (UID: "0e17948c-a091-4786-8319-f166892424e1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.148711 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0e17948c-a091-4786-8319-f166892424e1" (UID: "0e17948c-a091-4786-8319-f166892424e1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.161865 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" podStartSLOduration=8.161847057 podStartE2EDuration="8.161847057s" podCreationTimestamp="2026-01-29 06:55:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:00.129530329 +0000 UTC m=+1251.801024896" watchObservedRunningTime="2026-01-29 06:56:00.161847057 +0000 UTC m=+1251.833341614" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.166168 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e17948c-a091-4786-8319-f166892424e1-kube-api-access-5nf2p" (OuterVolumeSpecName: "kube-api-access-5nf2p") pod "0e17948c-a091-4786-8319-f166892424e1" (UID: "0e17948c-a091-4786-8319-f166892424e1"). InnerVolumeSpecName "kube-api-access-5nf2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.178875 4861 scope.go:117] "RemoveContainer" containerID="b839bbf835143eefafd14bb089237a16fb28f459121cfb5ea699317d4a8350a7" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.183510 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.14036531 podStartE2EDuration="14.183489249s" podCreationTimestamp="2026-01-29 06:55:46 +0000 UTC" firstStartedPulling="2026-01-29 06:55:47.561837293 +0000 UTC m=+1239.233331850" lastFinishedPulling="2026-01-29 06:55:59.604961232 +0000 UTC m=+1251.276455789" observedRunningTime="2026-01-29 06:56:00.156517269 +0000 UTC m=+1251.828011826" watchObservedRunningTime="2026-01-29 06:56:00.183489249 +0000 UTC m=+1251.854983826" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.220530 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0e17948c-a091-4786-8319-f166892424e1" (UID: "0e17948c-a091-4786-8319-f166892424e1"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.221654 4861 scope.go:117] "RemoveContainer" containerID="ec43666028ab1955761002ca6a31c9bfc6f661f016a4f35f55b6514337545ae2" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.234763 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.234795 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nf2p\" (UniqueName: \"kubernetes.io/projected/0e17948c-a091-4786-8319-f166892424e1-kube-api-access-5nf2p\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.234806 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.234815 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e17948c-a091-4786-8319-f166892424e1-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.234823 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.258931 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e17948c-a091-4786-8319-f166892424e1" (UID: "0e17948c-a091-4786-8319-f166892424e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.283632 4861 scope.go:117] "RemoveContainer" containerID="0e895341b1ce6cf6d03f93c74e91d445498a819b791904138cf32b44a15d8b82" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.323301 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-config-data" (OuterVolumeSpecName: "config-data") pod "0e17948c-a091-4786-8319-f166892424e1" (UID: "0e17948c-a091-4786-8319-f166892424e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.331925 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.340195 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.340221 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e17948c-a091-4786-8319-f166892424e1-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.436345 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.442199 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xmsh\" (UniqueName: \"kubernetes.io/projected/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-kube-api-access-8xmsh\") pod \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.442253 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-internal-tls-certs\") pod \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.442304 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-logs\") pod \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.442370 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-config-data\") pod \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.442392 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-scripts\") pod \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.442422 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-httpd-run\") pod \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.442466 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.442539 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-combined-ca-bundle\") pod \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\" (UID: \"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.444732 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" (UID: "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.445661 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-logs" (OuterVolumeSpecName: "logs") pod "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" (UID: "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.462656 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-kube-api-access-8xmsh" (OuterVolumeSpecName: "kube-api-access-8xmsh") pod "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" (UID: "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b"). InnerVolumeSpecName "kube-api-access-8xmsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.486456 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.510232 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-scripts" (OuterVolumeSpecName: "scripts") pod "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" (UID: "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.515619 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" (UID: "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.536654 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.539365 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" (UID: "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543433 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:00 crc kubenswrapper[4861]: E0129 06:56:00.543812 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="ceilometer-notification-agent" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543824 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="ceilometer-notification-agent" Jan 29 06:56:00 crc kubenswrapper[4861]: E0129 06:56:00.543832 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="proxy-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543838 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="proxy-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: E0129 06:56:00.543875 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="sg-core" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543883 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="sg-core" Jan 29 06:56:00 crc kubenswrapper[4861]: E0129 06:56:00.543897 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerName="glance-log" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543903 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerName="glance-log" Jan 29 06:56:00 crc kubenswrapper[4861]: E0129 06:56:00.543928 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="ceilometer-central-agent" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543934 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="ceilometer-central-agent" Jan 29 06:56:00 crc kubenswrapper[4861]: E0129 06:56:00.543944 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerName="glance-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543950 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerName="glance-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: E0129 06:56:00.543957 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerName="glance-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543963 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerName="glance-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: E0129 06:56:00.543978 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerName="glance-log" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.543983 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerName="glance-log" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544166 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerName="glance-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544177 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerName="glance-log" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544187 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" containerName="glance-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544198 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="ceilometer-notification-agent" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544206 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="proxy-httpd" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544215 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" containerName="glance-log" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544229 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="sg-core" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544238 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e17948c-a091-4786-8319-f166892424e1" containerName="ceilometer-central-agent" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544536 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-httpd-run\") pod \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544620 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-public-tls-certs\") pod \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544651 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-combined-ca-bundle\") pod \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544678 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-scripts\") pod \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544803 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-config-data\") pod \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544834 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-logs\") pod \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544866 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.544900 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mbl7\" (UniqueName: \"kubernetes.io/projected/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-kube-api-access-6mbl7\") pod \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\" (UID: \"4bf22b0a-5a8f-4f18-b259-8c43fac759dd\") " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.545328 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.545347 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xmsh\" (UniqueName: \"kubernetes.io/projected/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-kube-api-access-8xmsh\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.545360 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.545370 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.545382 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.545399 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.548216 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.549752 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4bf22b0a-5a8f-4f18-b259-8c43fac759dd" (UID: "4bf22b0a-5a8f-4f18-b259-8c43fac759dd"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.549957 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-logs" (OuterVolumeSpecName: "logs") pod "4bf22b0a-5a8f-4f18-b259-8c43fac759dd" (UID: "4bf22b0a-5a8f-4f18-b259-8c43fac759dd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.551091 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-scripts" (OuterVolumeSpecName: "scripts") pod "4bf22b0a-5a8f-4f18-b259-8c43fac759dd" (UID: "4bf22b0a-5a8f-4f18-b259-8c43fac759dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.556660 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.556902 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.572648 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-kube-api-access-6mbl7" (OuterVolumeSpecName: "kube-api-access-6mbl7") pod "4bf22b0a-5a8f-4f18-b259-8c43fac759dd" (UID: "4bf22b0a-5a8f-4f18-b259-8c43fac759dd"). InnerVolumeSpecName "kube-api-access-6mbl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.592217 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.598046 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.599259 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "4bf22b0a-5a8f-4f18-b259-8c43fac759dd" (UID: "4bf22b0a-5a8f-4f18-b259-8c43fac759dd"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.636963 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4bf22b0a-5a8f-4f18-b259-8c43fac759dd" (UID: "4bf22b0a-5a8f-4f18-b259-8c43fac759dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.637846 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" (UID: "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.642723 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-config-data" (OuterVolumeSpecName: "config-data") pod "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" (UID: "ad57ea59-d551-4e68-aaf6-7b2ee6ea739b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647123 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5nmb\" (UniqueName: \"kubernetes.io/projected/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-kube-api-access-f5nmb\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647178 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-scripts\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647232 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-run-httpd\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647260 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647291 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-log-httpd\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647406 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-config-data\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647461 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647518 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mbl7\" (UniqueName: \"kubernetes.io/projected/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-kube-api-access-6mbl7\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647532 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647542 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647550 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647558 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647567 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647576 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647584 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.647602 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.681791 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-g6s94"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.706319 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-config-data" (OuterVolumeSpecName: "config-data") pod "4bf22b0a-5a8f-4f18-b259-8c43fac759dd" (UID: "4bf22b0a-5a8f-4f18-b259-8c43fac759dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.710861 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-733f-account-create-update-rmh9j"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.728394 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.738383 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-9wjtc"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.746423 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-9g56k"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748481 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748517 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5nmb\" (UniqueName: \"kubernetes.io/projected/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-kube-api-access-f5nmb\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748542 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-scripts\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748578 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-run-httpd\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748596 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748617 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-log-httpd\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748687 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-config-data\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748737 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.748749 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.752982 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-config-data\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.753023 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-run-httpd\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.757434 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-log-httpd\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.757763 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-scripts\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.758297 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.760173 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-4jqs4"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.777340 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4bf22b0a-5a8f-4f18-b259-8c43fac759dd" (UID: "4bf22b0a-5a8f-4f18-b259-8c43fac759dd"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.780028 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5nmb\" (UniqueName: \"kubernetes.io/projected/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-kube-api-access-f5nmb\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.788912 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " pod="openstack/ceilometer-0" Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.791671 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0391-account-create-update-9sb8t"] Jan 29 06:56:00 crc kubenswrapper[4861]: I0129 06:56:00.851116 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bf22b0a-5a8f-4f18-b259-8c43fac759dd-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.043990 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.138441 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e17948c-a091-4786-8319-f166892424e1" path="/var/lib/kubelet/pods/0e17948c-a091-4786-8319-f166892424e1/volumes" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.159964 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0391-account-create-update-9sb8t" event={"ID":"2285a712-569a-411b-99b9-73d10212c822","Type":"ContainerStarted","Data":"3d8913b8fd01c868a921f30a661f35f5ded4fd82c3624ffc847c70fc2c0cb4f9"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.161140 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" event={"ID":"e04235da-615a-4098-9192-66537e3d2c8b","Type":"ContainerStarted","Data":"1e634fdcf03c8f53282b290c793d6b040cffed4dbe42c8934c1ba276df36ae3b"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.161184 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" event={"ID":"e04235da-615a-4098-9192-66537e3d2c8b","Type":"ContainerStarted","Data":"6ba00be9ab23ee487cd1a8ae36ff37c4a103248f03a4bbabfc2bdfb36e3898df"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.177352 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.179088 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4bf22b0a-5a8f-4f18-b259-8c43fac759dd","Type":"ContainerDied","Data":"a935ebdeb201738e741fbc76e1cdcf8de98bedc486fd59184ce464857e702ba8"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.179159 4861 scope.go:117] "RemoveContainer" containerID="0b5e0b509d93b270a78efdf4453478a55b8536121a28cd51c081d4a2b3fbd06b" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.193270 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" podStartSLOduration=5.19325461 podStartE2EDuration="5.19325461s" podCreationTimestamp="2026-01-29 06:55:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:01.176574987 +0000 UTC m=+1252.848069544" watchObservedRunningTime="2026-01-29 06:56:01.19325461 +0000 UTC m=+1252.864749167" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.208213 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-g6s94" event={"ID":"1ed744f8-d389-4054-8bce-7a1f7b1be71f","Type":"ContainerStarted","Data":"201cdbdb2df08c9c35f6540911640e71c10ebe7ff1718cb7caf09db3f61c1c95"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.208251 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-g6s94" event={"ID":"1ed744f8-d389-4054-8bce-7a1f7b1be71f","Type":"ContainerStarted","Data":"49df61298e6f5d6de608af3acde014d8834a27989ff447fbc44b40464ac053b3"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.225879 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9g56k" event={"ID":"9d56467d-6815-4f11-9b52-06cd32a818ab","Type":"ContainerStarted","Data":"fe7450ba0e402da5c2f5d7e5d0760f7d1e0f5894d9b62a87940fe5d055af49f2"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.225917 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9g56k" event={"ID":"9d56467d-6815-4f11-9b52-06cd32a818ab","Type":"ContainerStarted","Data":"56d80052f7a8d39c779566379a4a91ae7c1fb04431c7a2c4a3119243f669f2b4"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.235041 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.267171 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.268856 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad57ea59-d551-4e68-aaf6-7b2ee6ea739b","Type":"ContainerDied","Data":"1a2eeb3b1bac6549a3e7dbd577241a19ccebaf204542e434abdeab2804fa70fc"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.268956 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.291456 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-9g56k" podStartSLOduration=6.291434978 podStartE2EDuration="6.291434978s" podCreationTimestamp="2026-01-29 06:55:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:01.273345469 +0000 UTC m=+1252.944840026" watchObservedRunningTime="2026-01-29 06:56:01.291434978 +0000 UTC m=+1252.962929535" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.292015 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.300952 4861 scope.go:117] "RemoveContainer" containerID="a37bdb21623a422cbac5028522ac71470fcf73cee302efbb609b5d8966142036" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.302674 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.310698 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-733f-account-create-update-rmh9j" event={"ID":"a0631735-7992-4ca9-8564-2fb8c223a266","Type":"ContainerStarted","Data":"a5f64eec477c12ec38ba2fac8b31e70090fd279bc5e09a3440fc3328dc16aa74"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.310739 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-733f-account-create-update-rmh9j" event={"ID":"a0631735-7992-4ca9-8564-2fb8c223a266","Type":"ContainerStarted","Data":"1daa25b5244fa9071ae1e1d6e767862a5f7faedcaee17562301201e645688a05"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.312115 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.312194 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.312465 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-nnz82" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.312630 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.330808 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9wjtc" event={"ID":"25104968-4e44-41eb-be19-a88e17384e57","Type":"ContainerStarted","Data":"9f6be3de9ba208665f85ce4d4db605cb993a64e30c2100a8a6eccfa07d1e0a8f"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.330837 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9wjtc" event={"ID":"25104968-4e44-41eb-be19-a88e17384e57","Type":"ContainerStarted","Data":"4c3700990eb921a6a5d7b24d24e731f55311d033e84dd8c65425adc035e89252"} Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.362324 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.363611 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-scripts\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.363638 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.363707 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-logs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.363741 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.363758 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bvfs\" (UniqueName: \"kubernetes.io/projected/10b22efc-707a-4ffc-8edc-44c39900ba2b-kube-api-access-2bvfs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.363798 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.363815 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.363900 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-config-data\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.379002 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-g6s94" podStartSLOduration=5.37897978 podStartE2EDuration="5.37897978s" podCreationTimestamp="2026-01-29 06:55:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:01.300479633 +0000 UTC m=+1252.971974190" watchObservedRunningTime="2026-01-29 06:56:01.37897978 +0000 UTC m=+1253.050474337" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.407482 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.427618 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.446889 4861 scope.go:117] "RemoveContainer" containerID="3ebe082356931eb76e7562873162d36ba549bea59f8d99bc8ac0a4cfd7a0f83a" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.453138 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.455009 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.463129 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.463354 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465340 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-scripts\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465381 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465423 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-logs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465475 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465495 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bvfs\" (UniqueName: \"kubernetes.io/projected/10b22efc-707a-4ffc-8edc-44c39900ba2b-kube-api-access-2bvfs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465558 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465577 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465700 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-config-data\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.465853 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.466456 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-logs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.467126 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.480504 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.485205 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-config-data\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.486583 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-scripts\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.498494 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bvfs\" (UniqueName: \"kubernetes.io/projected/10b22efc-707a-4ffc-8edc-44c39900ba2b-kube-api-access-2bvfs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.512853 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.517310 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-733f-account-create-update-rmh9j" podStartSLOduration=5.517293611 podStartE2EDuration="5.517293611s" podCreationTimestamp="2026-01-29 06:55:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:01.386461645 +0000 UTC m=+1253.057956202" watchObservedRunningTime="2026-01-29 06:56:01.517293611 +0000 UTC m=+1253.188788168" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.542591 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.554765 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.584487 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.602754 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-9wjtc" podStartSLOduration=5.602737988 podStartE2EDuration="5.602737988s" podCreationTimestamp="2026-01-29 06:55:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:01.472046346 +0000 UTC m=+1253.143540903" watchObservedRunningTime="2026-01-29 06:56:01.602737988 +0000 UTC m=+1253.274232545" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.637149 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.674934 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.675504 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.675618 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nphx5\" (UniqueName: \"kubernetes.io/projected/5ce76094-c71f-46c7-a69d-7d30d8540c5a-kube-api-access-nphx5\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.675706 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.675773 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.675818 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.675851 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.675883 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.708475 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777131 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777178 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777234 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777289 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777354 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nphx5\" (UniqueName: \"kubernetes.io/projected/5ce76094-c71f-46c7-a69d-7d30d8540c5a-kube-api-access-nphx5\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777387 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777414 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777433 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.777817 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.778444 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.778486 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.782344 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.782640 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.782738 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.786688 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.798301 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nphx5\" (UniqueName: \"kubernetes.io/projected/5ce76094-c71f-46c7-a69d-7d30d8540c5a-kube-api-access-nphx5\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.847826 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " pod="openstack/glance-default-internal-api-0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.961232 4861 scope.go:117] "RemoveContainer" containerID="830b8ad78a9cb455d1b976b843a3c03005c610d03cc564242cb5c13c4c9476d0" Jan 29 06:56:01 crc kubenswrapper[4861]: I0129 06:56:01.961933 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.267391 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:56:02 crc kubenswrapper[4861]: W0129 06:56:02.273379 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10b22efc_707a_4ffc_8edc_44c39900ba2b.slice/crio-7d02af97aadcaccb8482a8772034ffa20d2d4129ccc5be13e4f2477ade5e3cec WatchSource:0}: Error finding container 7d02af97aadcaccb8482a8772034ffa20d2d4129ccc5be13e4f2477ade5e3cec: Status 404 returned error can't find the container with id 7d02af97aadcaccb8482a8772034ffa20d2d4129ccc5be13e4f2477ade5e3cec Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.356496 4861 generic.go:334] "Generic (PLEG): container finished" podID="e04235da-615a-4098-9192-66537e3d2c8b" containerID="1e634fdcf03c8f53282b290c793d6b040cffed4dbe42c8934c1ba276df36ae3b" exitCode=0 Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.356732 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" event={"ID":"e04235da-615a-4098-9192-66537e3d2c8b","Type":"ContainerDied","Data":"1e634fdcf03c8f53282b290c793d6b040cffed4dbe42c8934c1ba276df36ae3b"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.359106 4861 generic.go:334] "Generic (PLEG): container finished" podID="25104968-4e44-41eb-be19-a88e17384e57" containerID="9f6be3de9ba208665f85ce4d4db605cb993a64e30c2100a8a6eccfa07d1e0a8f" exitCode=0 Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.359146 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9wjtc" event={"ID":"25104968-4e44-41eb-be19-a88e17384e57","Type":"ContainerDied","Data":"9f6be3de9ba208665f85ce4d4db605cb993a64e30c2100a8a6eccfa07d1e0a8f"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.360739 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10b22efc-707a-4ffc-8edc-44c39900ba2b","Type":"ContainerStarted","Data":"7d02af97aadcaccb8482a8772034ffa20d2d4129ccc5be13e4f2477ade5e3cec"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.364791 4861 generic.go:334] "Generic (PLEG): container finished" podID="9d56467d-6815-4f11-9b52-06cd32a818ab" containerID="fe7450ba0e402da5c2f5d7e5d0760f7d1e0f5894d9b62a87940fe5d055af49f2" exitCode=0 Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.364834 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9g56k" event={"ID":"9d56467d-6815-4f11-9b52-06cd32a818ab","Type":"ContainerDied","Data":"fe7450ba0e402da5c2f5d7e5d0760f7d1e0f5894d9b62a87940fe5d055af49f2"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.373357 4861 generic.go:334] "Generic (PLEG): container finished" podID="a0631735-7992-4ca9-8564-2fb8c223a266" containerID="a5f64eec477c12ec38ba2fac8b31e70090fd279bc5e09a3440fc3328dc16aa74" exitCode=0 Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.373420 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-733f-account-create-update-rmh9j" event={"ID":"a0631735-7992-4ca9-8564-2fb8c223a266","Type":"ContainerDied","Data":"a5f64eec477c12ec38ba2fac8b31e70090fd279bc5e09a3440fc3328dc16aa74"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.382752 4861 generic.go:334] "Generic (PLEG): container finished" podID="2285a712-569a-411b-99b9-73d10212c822" containerID="7508e3d4514c3d722dddc4711161afe75d06331d15db9417baef8bca8f91efbe" exitCode=0 Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.382811 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0391-account-create-update-9sb8t" event={"ID":"2285a712-569a-411b-99b9-73d10212c822","Type":"ContainerDied","Data":"7508e3d4514c3d722dddc4711161afe75d06331d15db9417baef8bca8f91efbe"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.390582 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerStarted","Data":"c8d975e98f20c0efdbc66f93c817ebd7ca8b14ab6cbb58e519d487e8ca7e33cf"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.390641 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerStarted","Data":"914e583bbdfc9c0edadd7c2275a5f73e2e95ee09d4cdf5bb4de365d36aed8588"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.392658 4861 generic.go:334] "Generic (PLEG): container finished" podID="1ed744f8-d389-4054-8bce-7a1f7b1be71f" containerID="201cdbdb2df08c9c35f6540911640e71c10ebe7ff1718cb7caf09db3f61c1c95" exitCode=0 Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.392919 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-g6s94" event={"ID":"1ed744f8-d389-4054-8bce-7a1f7b1be71f","Type":"ContainerDied","Data":"201cdbdb2df08c9c35f6540911640e71c10ebe7ff1718cb7caf09db3f61c1c95"} Jan 29 06:56:02 crc kubenswrapper[4861]: I0129 06:56:02.541663 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.126343 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bf22b0a-5a8f-4f18-b259-8c43fac759dd" path="/var/lib/kubelet/pods/4bf22b0a-5a8f-4f18-b259-8c43fac759dd/volumes" Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.127452 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad57ea59-d551-4e68-aaf6-7b2ee6ea739b" path="/var/lib/kubelet/pods/ad57ea59-d551-4e68-aaf6-7b2ee6ea739b/volumes" Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.146383 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.146568 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f091de47-47eb-4d84-92b8-c478bc6a7af7" containerName="kube-state-metrics" containerID="cri-o://314634ced71a87f16173e1b9c68237474cd93111184df069ec48d9a61d3014b3" gracePeriod=30 Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.415379 4861 generic.go:334] "Generic (PLEG): container finished" podID="f091de47-47eb-4d84-92b8-c478bc6a7af7" containerID="314634ced71a87f16173e1b9c68237474cd93111184df069ec48d9a61d3014b3" exitCode=2 Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.415675 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f091de47-47eb-4d84-92b8-c478bc6a7af7","Type":"ContainerDied","Data":"314634ced71a87f16173e1b9c68237474cd93111184df069ec48d9a61d3014b3"} Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.447787 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5ce76094-c71f-46c7-a69d-7d30d8540c5a","Type":"ContainerStarted","Data":"1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483"} Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.447860 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5ce76094-c71f-46c7-a69d-7d30d8540c5a","Type":"ContainerStarted","Data":"24643f26c66512e243106d2edf41b11b2f702622b324f89a9e50b3c2f610202b"} Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.451566 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerStarted","Data":"4a388b859bae16f5d0166d76130a09463b7c85548aba200b58189d8b5ea112c8"} Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.453315 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10b22efc-707a-4ffc-8edc-44c39900ba2b","Type":"ContainerStarted","Data":"f4608109881c8d879d0747a15002faa7be33fa42a0ab54b3b737788b5adb25d7"} Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.705245 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.826193 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4p8q\" (UniqueName: \"kubernetes.io/projected/f091de47-47eb-4d84-92b8-c478bc6a7af7-kube-api-access-c4p8q\") pod \"f091de47-47eb-4d84-92b8-c478bc6a7af7\" (UID: \"f091de47-47eb-4d84-92b8-c478bc6a7af7\") " Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.831804 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f091de47-47eb-4d84-92b8-c478bc6a7af7-kube-api-access-c4p8q" (OuterVolumeSpecName: "kube-api-access-c4p8q") pod "f091de47-47eb-4d84-92b8-c478bc6a7af7" (UID: "f091de47-47eb-4d84-92b8-c478bc6a7af7"). InnerVolumeSpecName "kube-api-access-c4p8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.928409 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4p8q\" (UniqueName: \"kubernetes.io/projected/f091de47-47eb-4d84-92b8-c478bc6a7af7-kube-api-access-c4p8q\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:03 crc kubenswrapper[4861]: I0129 06:56:03.978960 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.142649 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25104968-4e44-41eb-be19-a88e17384e57-operator-scripts\") pod \"25104968-4e44-41eb-be19-a88e17384e57\" (UID: \"25104968-4e44-41eb-be19-a88e17384e57\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.142777 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jf79s\" (UniqueName: \"kubernetes.io/projected/25104968-4e44-41eb-be19-a88e17384e57-kube-api-access-jf79s\") pod \"25104968-4e44-41eb-be19-a88e17384e57\" (UID: \"25104968-4e44-41eb-be19-a88e17384e57\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.144240 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25104968-4e44-41eb-be19-a88e17384e57-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "25104968-4e44-41eb-be19-a88e17384e57" (UID: "25104968-4e44-41eb-be19-a88e17384e57"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.155306 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25104968-4e44-41eb-be19-a88e17384e57-kube-api-access-jf79s" (OuterVolumeSpecName: "kube-api-access-jf79s") pod "25104968-4e44-41eb-be19-a88e17384e57" (UID: "25104968-4e44-41eb-be19-a88e17384e57"). InnerVolumeSpecName "kube-api-access-jf79s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.223318 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.252179 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25104968-4e44-41eb-be19-a88e17384e57-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.252433 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jf79s\" (UniqueName: \"kubernetes.io/projected/25104968-4e44-41eb-be19-a88e17384e57-kube-api-access-jf79s\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.254623 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.295304 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.310488 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.357994 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed744f8-d389-4054-8bce-7a1f7b1be71f-operator-scripts\") pod \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\" (UID: \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.358090 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzwjw\" (UniqueName: \"kubernetes.io/projected/a0631735-7992-4ca9-8564-2fb8c223a266-kube-api-access-qzwjw\") pod \"a0631735-7992-4ca9-8564-2fb8c223a266\" (UID: \"a0631735-7992-4ca9-8564-2fb8c223a266\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.358931 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ed744f8-d389-4054-8bce-7a1f7b1be71f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1ed744f8-d389-4054-8bce-7a1f7b1be71f" (UID: "1ed744f8-d389-4054-8bce-7a1f7b1be71f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.359824 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0631735-7992-4ca9-8564-2fb8c223a266-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a0631735-7992-4ca9-8564-2fb8c223a266" (UID: "a0631735-7992-4ca9-8564-2fb8c223a266"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.359123 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0631735-7992-4ca9-8564-2fb8c223a266-operator-scripts\") pod \"a0631735-7992-4ca9-8564-2fb8c223a266\" (UID: \"a0631735-7992-4ca9-8564-2fb8c223a266\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.360283 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-994cl\" (UniqueName: \"kubernetes.io/projected/1ed744f8-d389-4054-8bce-7a1f7b1be71f-kube-api-access-994cl\") pod \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\" (UID: \"1ed744f8-d389-4054-8bce-7a1f7b1be71f\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.380605 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed744f8-d389-4054-8bce-7a1f7b1be71f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.380662 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0631735-7992-4ca9-8564-2fb8c223a266-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.380762 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ed744f8-d389-4054-8bce-7a1f7b1be71f-kube-api-access-994cl" (OuterVolumeSpecName: "kube-api-access-994cl") pod "1ed744f8-d389-4054-8bce-7a1f7b1be71f" (UID: "1ed744f8-d389-4054-8bce-7a1f7b1be71f"). InnerVolumeSpecName "kube-api-access-994cl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.380611 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0631735-7992-4ca9-8564-2fb8c223a266-kube-api-access-qzwjw" (OuterVolumeSpecName: "kube-api-access-qzwjw") pod "a0631735-7992-4ca9-8564-2fb8c223a266" (UID: "a0631735-7992-4ca9-8564-2fb8c223a266"). InnerVolumeSpecName "kube-api-access-qzwjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.464337 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-733f-account-create-update-rmh9j" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.464978 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-733f-account-create-update-rmh9j" event={"ID":"a0631735-7992-4ca9-8564-2fb8c223a266","Type":"ContainerDied","Data":"1daa25b5244fa9071ae1e1d6e767862a5f7faedcaee17562301201e645688a05"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.466212 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1daa25b5244fa9071ae1e1d6e767862a5f7faedcaee17562301201e645688a05" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.467220 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f091de47-47eb-4d84-92b8-c478bc6a7af7","Type":"ContainerDied","Data":"315ec98084a2af61029598f51483b7de7c636f2d7f53e43acb214e1fbf8573e3"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.467264 4861 scope.go:117] "RemoveContainer" containerID="314634ced71a87f16173e1b9c68237474cd93111184df069ec48d9a61d3014b3" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.467465 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.477710 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" event={"ID":"e04235da-615a-4098-9192-66537e3d2c8b","Type":"ContainerDied","Data":"6ba00be9ab23ee487cd1a8ae36ff37c4a103248f03a4bbabfc2bdfb36e3898df"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.477735 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f01-account-create-update-4jqs4" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.477749 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ba00be9ab23ee487cd1a8ae36ff37c4a103248f03a4bbabfc2bdfb36e3898df" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.482320 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwpl5\" (UniqueName: \"kubernetes.io/projected/2285a712-569a-411b-99b9-73d10212c822-kube-api-access-nwpl5\") pod \"2285a712-569a-411b-99b9-73d10212c822\" (UID: \"2285a712-569a-411b-99b9-73d10212c822\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.482402 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x4m8\" (UniqueName: \"kubernetes.io/projected/e04235da-615a-4098-9192-66537e3d2c8b-kube-api-access-9x4m8\") pod \"e04235da-615a-4098-9192-66537e3d2c8b\" (UID: \"e04235da-615a-4098-9192-66537e3d2c8b\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.482427 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2285a712-569a-411b-99b9-73d10212c822-operator-scripts\") pod \"2285a712-569a-411b-99b9-73d10212c822\" (UID: \"2285a712-569a-411b-99b9-73d10212c822\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.482452 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e04235da-615a-4098-9192-66537e3d2c8b-operator-scripts\") pod \"e04235da-615a-4098-9192-66537e3d2c8b\" (UID: \"e04235da-615a-4098-9192-66537e3d2c8b\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.482710 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-994cl\" (UniqueName: \"kubernetes.io/projected/1ed744f8-d389-4054-8bce-7a1f7b1be71f-kube-api-access-994cl\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.482726 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzwjw\" (UniqueName: \"kubernetes.io/projected/a0631735-7992-4ca9-8564-2fb8c223a266-kube-api-access-qzwjw\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.483885 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e04235da-615a-4098-9192-66537e3d2c8b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e04235da-615a-4098-9192-66537e3d2c8b" (UID: "e04235da-615a-4098-9192-66537e3d2c8b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.484265 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10b22efc-707a-4ffc-8edc-44c39900ba2b","Type":"ContainerStarted","Data":"9dd5b64d9ec144641a738b5aa4db658de3394d4fbf3ece8178a1881822e737bf"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.484281 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2285a712-569a-411b-99b9-73d10212c822-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2285a712-569a-411b-99b9-73d10212c822" (UID: "2285a712-569a-411b-99b9-73d10212c822"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.484616 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9g56k" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.496321 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e04235da-615a-4098-9192-66537e3d2c8b-kube-api-access-9x4m8" (OuterVolumeSpecName: "kube-api-access-9x4m8") pod "e04235da-615a-4098-9192-66537e3d2c8b" (UID: "e04235da-615a-4098-9192-66537e3d2c8b"). InnerVolumeSpecName "kube-api-access-9x4m8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.499831 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2285a712-569a-411b-99b9-73d10212c822-kube-api-access-nwpl5" (OuterVolumeSpecName: "kube-api-access-nwpl5") pod "2285a712-569a-411b-99b9-73d10212c822" (UID: "2285a712-569a-411b-99b9-73d10212c822"). InnerVolumeSpecName "kube-api-access-nwpl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.509741 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9g56k" event={"ID":"9d56467d-6815-4f11-9b52-06cd32a818ab","Type":"ContainerDied","Data":"56d80052f7a8d39c779566379a4a91ae7c1fb04431c7a2c4a3119243f669f2b4"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.509937 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56d80052f7a8d39c779566379a4a91ae7c1fb04431c7a2c4a3119243f669f2b4" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.523448 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-g6s94" event={"ID":"1ed744f8-d389-4054-8bce-7a1f7b1be71f","Type":"ContainerDied","Data":"49df61298e6f5d6de608af3acde014d8834a27989ff447fbc44b40464ac053b3"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.523579 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49df61298e6f5d6de608af3acde014d8834a27989ff447fbc44b40464ac053b3" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.523725 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-g6s94" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.531367 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0391-account-create-update-9sb8t" event={"ID":"2285a712-569a-411b-99b9-73d10212c822","Type":"ContainerDied","Data":"3d8913b8fd01c868a921f30a661f35f5ded4fd82c3624ffc847c70fc2c0cb4f9"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.531412 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d8913b8fd01c868a921f30a661f35f5ded4fd82c3624ffc847c70fc2c0cb4f9" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.531435 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0391-account-create-update-9sb8t" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.556516 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9wjtc" event={"ID":"25104968-4e44-41eb-be19-a88e17384e57","Type":"ContainerDied","Data":"4c3700990eb921a6a5d7b24d24e731f55311d033e84dd8c65425adc035e89252"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.556560 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c3700990eb921a6a5d7b24d24e731f55311d033e84dd8c65425adc035e89252" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.556642 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9wjtc" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.563702 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.563683365 podStartE2EDuration="3.563683365s" podCreationTimestamp="2026-01-29 06:56:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:04.537750011 +0000 UTC m=+1256.209244568" watchObservedRunningTime="2026-01-29 06:56:04.563683365 +0000 UTC m=+1256.235177922" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.566904 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5ce76094-c71f-46c7-a69d-7d30d8540c5a","Type":"ContainerStarted","Data":"c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.578290 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerStarted","Data":"84abed4c06273d14a3e3619785d499f0dd56ff4fa89cada94e808003bbe05ceb"} Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.585380 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.586526 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwpl5\" (UniqueName: \"kubernetes.io/projected/2285a712-569a-411b-99b9-73d10212c822-kube-api-access-nwpl5\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.586549 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x4m8\" (UniqueName: \"kubernetes.io/projected/e04235da-615a-4098-9192-66537e3d2c8b-kube-api-access-9x4m8\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.586561 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2285a712-569a-411b-99b9-73d10212c822-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.586571 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e04235da-615a-4098-9192-66537e3d2c8b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.595927 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619109 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:56:04 crc kubenswrapper[4861]: E0129 06:56:04.619479 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2285a712-569a-411b-99b9-73d10212c822" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619494 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="2285a712-569a-411b-99b9-73d10212c822" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: E0129 06:56:04.619511 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25104968-4e44-41eb-be19-a88e17384e57" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619517 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="25104968-4e44-41eb-be19-a88e17384e57" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: E0129 06:56:04.619528 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0631735-7992-4ca9-8564-2fb8c223a266" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619534 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0631735-7992-4ca9-8564-2fb8c223a266" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: E0129 06:56:04.619542 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ed744f8-d389-4054-8bce-7a1f7b1be71f" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619549 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ed744f8-d389-4054-8bce-7a1f7b1be71f" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: E0129 06:56:04.619566 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e04235da-615a-4098-9192-66537e3d2c8b" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619571 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e04235da-615a-4098-9192-66537e3d2c8b" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: E0129 06:56:04.619585 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d56467d-6815-4f11-9b52-06cd32a818ab" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619591 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d56467d-6815-4f11-9b52-06cd32a818ab" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: E0129 06:56:04.619601 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f091de47-47eb-4d84-92b8-c478bc6a7af7" containerName="kube-state-metrics" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619606 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f091de47-47eb-4d84-92b8-c478bc6a7af7" containerName="kube-state-metrics" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619749 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="25104968-4e44-41eb-be19-a88e17384e57" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619762 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0631735-7992-4ca9-8564-2fb8c223a266" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619773 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="2285a712-569a-411b-99b9-73d10212c822" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619789 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e04235da-615a-4098-9192-66537e3d2c8b" containerName="mariadb-account-create-update" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619799 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d56467d-6815-4f11-9b52-06cd32a818ab" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619804 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f091de47-47eb-4d84-92b8-c478bc6a7af7" containerName="kube-state-metrics" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619816 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ed744f8-d389-4054-8bce-7a1f7b1be71f" containerName="mariadb-database-create" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.619834 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.619814792 podStartE2EDuration="3.619814792s" podCreationTimestamp="2026-01-29 06:56:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:04.60355406 +0000 UTC m=+1256.275048617" watchObservedRunningTime="2026-01-29 06:56:04.619814792 +0000 UTC m=+1256.291309349" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.620410 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.624312 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.624461 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.638327 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.687467 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d56467d-6815-4f11-9b52-06cd32a818ab-operator-scripts\") pod \"9d56467d-6815-4f11-9b52-06cd32a818ab\" (UID: \"9d56467d-6815-4f11-9b52-06cd32a818ab\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.687516 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drvqt\" (UniqueName: \"kubernetes.io/projected/9d56467d-6815-4f11-9b52-06cd32a818ab-kube-api-access-drvqt\") pod \"9d56467d-6815-4f11-9b52-06cd32a818ab\" (UID: \"9d56467d-6815-4f11-9b52-06cd32a818ab\") " Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.688008 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d56467d-6815-4f11-9b52-06cd32a818ab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9d56467d-6815-4f11-9b52-06cd32a818ab" (UID: "9d56467d-6815-4f11-9b52-06cd32a818ab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.693691 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d56467d-6815-4f11-9b52-06cd32a818ab-kube-api-access-drvqt" (OuterVolumeSpecName: "kube-api-access-drvqt") pod "9d56467d-6815-4f11-9b52-06cd32a818ab" (UID: "9d56467d-6815-4f11-9b52-06cd32a818ab"). InnerVolumeSpecName "kube-api-access-drvqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.789291 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.789352 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.789469 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9znn\" (UniqueName: \"kubernetes.io/projected/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-api-access-n9znn\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.789497 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.789569 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d56467d-6815-4f11-9b52-06cd32a818ab-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.789584 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drvqt\" (UniqueName: \"kubernetes.io/projected/9d56467d-6815-4f11-9b52-06cd32a818ab-kube-api-access-drvqt\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.890898 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9znn\" (UniqueName: \"kubernetes.io/projected/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-api-access-n9znn\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.890958 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.891011 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.891046 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.903520 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.903519 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.903826 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.915886 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9znn\" (UniqueName: \"kubernetes.io/projected/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-api-access-n9znn\") pod \"kube-state-metrics-0\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " pod="openstack/kube-state-metrics-0" Jan 29 06:56:04 crc kubenswrapper[4861]: I0129 06:56:04.945133 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 06:56:05 crc kubenswrapper[4861]: I0129 06:56:05.144536 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f091de47-47eb-4d84-92b8-c478bc6a7af7" path="/var/lib/kubelet/pods/f091de47-47eb-4d84-92b8-c478bc6a7af7/volumes" Jan 29 06:56:05 crc kubenswrapper[4861]: I0129 06:56:05.428090 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:56:05 crc kubenswrapper[4861]: I0129 06:56:05.586623 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"953c17ea-50f0-4111-8bc1-16819c1bce47","Type":"ContainerStarted","Data":"1b991737399547fcfaec149607e840c70465d2417b8908a8836f74faabdfdabd"} Jan 29 06:56:05 crc kubenswrapper[4861]: I0129 06:56:05.589108 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9g56k" Jan 29 06:56:05 crc kubenswrapper[4861]: I0129 06:56:05.765337 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.597536 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"953c17ea-50f0-4111-8bc1-16819c1bce47","Type":"ContainerStarted","Data":"4c604e0c72a77e1a59db1a8f51efca23cb07d1fdc756481cde528fd19e684c10"} Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.597958 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.601471 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerStarted","Data":"36e731d26ef50594863018211c449748243653c97c5abcaa87fb11d34bef3730"} Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.601659 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="ceilometer-central-agent" containerID="cri-o://c8d975e98f20c0efdbc66f93c817ebd7ca8b14ab6cbb58e519d487e8ca7e33cf" gracePeriod=30 Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.601692 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="sg-core" containerID="cri-o://84abed4c06273d14a3e3619785d499f0dd56ff4fa89cada94e808003bbe05ceb" gracePeriod=30 Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.601710 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.601707 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="proxy-httpd" containerID="cri-o://36e731d26ef50594863018211c449748243653c97c5abcaa87fb11d34bef3730" gracePeriod=30 Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.601714 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="ceilometer-notification-agent" containerID="cri-o://4a388b859bae16f5d0166d76130a09463b7c85548aba200b58189d8b5ea112c8" gracePeriod=30 Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.623119 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nc5kl"] Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.624227 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.633011 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.633096 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.633012 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mlgng" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.639107 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nc5kl"] Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.717827 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.685150614 podStartE2EDuration="6.717804908s" podCreationTimestamp="2026-01-29 06:56:00 +0000 UTC" firstStartedPulling="2026-01-29 06:56:01.609307409 +0000 UTC m=+1253.280801956" lastFinishedPulling="2026-01-29 06:56:05.641961693 +0000 UTC m=+1257.313456250" observedRunningTime="2026-01-29 06:56:06.700569921 +0000 UTC m=+1258.372064488" watchObservedRunningTime="2026-01-29 06:56:06.717804908 +0000 UTC m=+1258.389299465" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.720615 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9gvq\" (UniqueName: \"kubernetes.io/projected/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-kube-api-access-w9gvq\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.720687 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-scripts\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.720715 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.720798 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.371446818 podStartE2EDuration="2.720756955s" podCreationTimestamp="2026-01-29 06:56:04 +0000 UTC" firstStartedPulling="2026-01-29 06:56:05.429129769 +0000 UTC m=+1257.100624326" lastFinishedPulling="2026-01-29 06:56:05.778439906 +0000 UTC m=+1257.449934463" observedRunningTime="2026-01-29 06:56:06.649649559 +0000 UTC m=+1258.321144116" watchObservedRunningTime="2026-01-29 06:56:06.720756955 +0000 UTC m=+1258.392251512" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.720852 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-config-data\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.823116 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9gvq\" (UniqueName: \"kubernetes.io/projected/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-kube-api-access-w9gvq\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.823170 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-scripts\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.823199 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.823270 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-config-data\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.828860 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-config-data\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.829346 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-scripts\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.831323 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.840871 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9gvq\" (UniqueName: \"kubernetes.io/projected/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-kube-api-access-w9gvq\") pod \"nova-cell0-conductor-db-sync-nc5kl\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:06 crc kubenswrapper[4861]: I0129 06:56:06.983788 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.466399 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nc5kl"] Jan 29 06:56:07 crc kubenswrapper[4861]: W0129 06:56:07.478676 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd03c3d0_dbd6_487e_90c9_9b58458a7cf2.slice/crio-000d3aae986ed0bf5e2eba1be47a9de7ab4f1633a4b49c4b4253c7e84a8e38e8 WatchSource:0}: Error finding container 000d3aae986ed0bf5e2eba1be47a9de7ab4f1633a4b49c4b4253c7e84a8e38e8: Status 404 returned error can't find the container with id 000d3aae986ed0bf5e2eba1be47a9de7ab4f1633a4b49c4b4253c7e84a8e38e8 Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.612904 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" event={"ID":"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2","Type":"ContainerStarted","Data":"000d3aae986ed0bf5e2eba1be47a9de7ab4f1633a4b49c4b4253c7e84a8e38e8"} Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.616755 4861 generic.go:334] "Generic (PLEG): container finished" podID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerID="36e731d26ef50594863018211c449748243653c97c5abcaa87fb11d34bef3730" exitCode=0 Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.616786 4861 generic.go:334] "Generic (PLEG): container finished" podID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerID="84abed4c06273d14a3e3619785d499f0dd56ff4fa89cada94e808003bbe05ceb" exitCode=2 Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.616796 4861 generic.go:334] "Generic (PLEG): container finished" podID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerID="4a388b859bae16f5d0166d76130a09463b7c85548aba200b58189d8b5ea112c8" exitCode=0 Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.616805 4861 generic.go:334] "Generic (PLEG): container finished" podID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerID="c8d975e98f20c0efdbc66f93c817ebd7ca8b14ab6cbb58e519d487e8ca7e33cf" exitCode=0 Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.616792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerDied","Data":"36e731d26ef50594863018211c449748243653c97c5abcaa87fb11d34bef3730"} Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.616889 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerDied","Data":"84abed4c06273d14a3e3619785d499f0dd56ff4fa89cada94e808003bbe05ceb"} Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.616902 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerDied","Data":"4a388b859bae16f5d0166d76130a09463b7c85548aba200b58189d8b5ea112c8"} Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.616925 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerDied","Data":"c8d975e98f20c0efdbc66f93c817ebd7ca8b14ab6cbb58e519d487e8ca7e33cf"} Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.758728 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.784911 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.837843 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-run-httpd\") pod \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.837908 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-sg-core-conf-yaml\") pod \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.838004 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5nmb\" (UniqueName: \"kubernetes.io/projected/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-kube-api-access-f5nmb\") pod \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.838172 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-scripts\") pod \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.838232 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-log-httpd\") pod \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.838269 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-config-data\") pod \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.838287 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-combined-ca-bundle\") pod \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\" (UID: \"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c\") " Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.838779 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" (UID: "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.839641 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" (UID: "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.845181 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-scripts" (OuterVolumeSpecName: "scripts") pod "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" (UID: "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.849972 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-kube-api-access-f5nmb" (OuterVolumeSpecName: "kube-api-access-f5nmb") pod "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" (UID: "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c"). InnerVolumeSpecName "kube-api-access-f5nmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.877566 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" (UID: "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.938961 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" (UID: "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.940053 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.940082 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.940091 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.940100 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.940108 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.940117 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5nmb\" (UniqueName: \"kubernetes.io/projected/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-kube-api-access-f5nmb\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:07 crc kubenswrapper[4861]: I0129 06:56:07.962181 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-config-data" (OuterVolumeSpecName: "config-data") pod "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" (UID: "67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.041939 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.630945 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c","Type":"ContainerDied","Data":"914e583bbdfc9c0edadd7c2275a5f73e2e95ee09d4cdf5bb4de365d36aed8588"} Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.631338 4861 scope.go:117] "RemoveContainer" containerID="36e731d26ef50594863018211c449748243653c97c5abcaa87fb11d34bef3730" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.631240 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.666361 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.682352 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.695509 4861 scope.go:117] "RemoveContainer" containerID="84abed4c06273d14a3e3619785d499f0dd56ff4fa89cada94e808003bbe05ceb" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.704367 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:08 crc kubenswrapper[4861]: E0129 06:56:08.704784 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="proxy-httpd" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.704796 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="proxy-httpd" Jan 29 06:56:08 crc kubenswrapper[4861]: E0129 06:56:08.704809 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="ceilometer-notification-agent" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.704814 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="ceilometer-notification-agent" Jan 29 06:56:08 crc kubenswrapper[4861]: E0129 06:56:08.704833 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="ceilometer-central-agent" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.704839 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="ceilometer-central-agent" Jan 29 06:56:08 crc kubenswrapper[4861]: E0129 06:56:08.704862 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="sg-core" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.704868 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="sg-core" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.705058 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="ceilometer-notification-agent" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.705101 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="ceilometer-central-agent" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.705114 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="proxy-httpd" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.705130 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" containerName="sg-core" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.706842 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.710253 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.711926 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.712114 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.712308 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.752477 4861 scope.go:117] "RemoveContainer" containerID="4a388b859bae16f5d0166d76130a09463b7c85548aba200b58189d8b5ea112c8" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.766908 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-log-httpd\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.766960 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.767008 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.767030 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45ljz\" (UniqueName: \"kubernetes.io/projected/e47eb4f6-0667-4299-8604-63d076967426-kube-api-access-45ljz\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.767145 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-config-data\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.767212 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-run-httpd\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.767305 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-scripts\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.767342 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.770348 4861 scope.go:117] "RemoveContainer" containerID="c8d975e98f20c0efdbc66f93c817ebd7ca8b14ab6cbb58e519d487e8ca7e33cf" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.869053 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.869114 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45ljz\" (UniqueName: \"kubernetes.io/projected/e47eb4f6-0667-4299-8604-63d076967426-kube-api-access-45ljz\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.869144 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-config-data\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.869186 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-run-httpd\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.869222 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-scripts\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.869244 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.869302 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-log-httpd\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.869335 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.870814 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-log-httpd\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.871527 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-run-httpd\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.876631 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.886333 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.886408 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-scripts\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.887387 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-config-data\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.891977 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:08 crc kubenswrapper[4861]: I0129 06:56:08.897765 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45ljz\" (UniqueName: \"kubernetes.io/projected/e47eb4f6-0667-4299-8604-63d076967426-kube-api-access-45ljz\") pod \"ceilometer-0\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " pod="openstack/ceilometer-0" Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.026841 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:09 crc kubenswrapper[4861]: E0129 06:56:09.128929 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa374fd0_6b03_475d_8230_9fb2a9768091.slice/crio-ddf002d2b21cac7beb20b97fe76c1d0bda80dbc4238c13fb2244922405a64059\": RecentStats: unable to find data in memory cache]" Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.158308 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c" path="/var/lib/kubelet/pods/67406ffa-2bf0-4f63-bff1-4f9f6ec6fa0c/volumes" Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.334217 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.419123 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-96c87b9b6-8r8w7"] Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.419482 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-96c87b9b6-8r8w7" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerName="neutron-httpd" containerID="cri-o://484505ca42014af98f6cbd0a6ac9cd3f6acfa09af05357b188ddd191011787db" gracePeriod=30 Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.419430 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-96c87b9b6-8r8w7" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerName="neutron-api" containerID="cri-o://48d76bc12530564c4b2eebc89cc5dfe44093d2a0f960d299491520d1b1bce1ec" gracePeriod=30 Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.560467 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.643942 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerStarted","Data":"af3a564c861a7ac12fdd20b02b1e3b5945ecc575003e7ebdafa41e31ce6fe894"} Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.651803 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-96c87b9b6-8r8w7" event={"ID":"e3f66a07-77f8-476d-80bf-f6cc152cfd17","Type":"ContainerDied","Data":"484505ca42014af98f6cbd0a6ac9cd3f6acfa09af05357b188ddd191011787db"} Jan 29 06:56:09 crc kubenswrapper[4861]: I0129 06:56:09.651782 4861 generic.go:334] "Generic (PLEG): container finished" podID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerID="484505ca42014af98f6cbd0a6ac9cd3f6acfa09af05357b188ddd191011787db" exitCode=0 Jan 29 06:56:10 crc kubenswrapper[4861]: I0129 06:56:10.664100 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerStarted","Data":"07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7"} Jan 29 06:56:11 crc kubenswrapper[4861]: I0129 06:56:11.206338 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:11 crc kubenswrapper[4861]: I0129 06:56:11.709352 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 06:56:11 crc kubenswrapper[4861]: I0129 06:56:11.709404 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 06:56:11 crc kubenswrapper[4861]: I0129 06:56:11.754748 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 06:56:11 crc kubenswrapper[4861]: I0129 06:56:11.758766 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 06:56:11 crc kubenswrapper[4861]: I0129 06:56:11.962296 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:11 crc kubenswrapper[4861]: I0129 06:56:11.962338 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:11 crc kubenswrapper[4861]: I0129 06:56:11.999517 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:12 crc kubenswrapper[4861]: I0129 06:56:12.013310 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:12 crc kubenswrapper[4861]: I0129 06:56:12.680982 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:12 crc kubenswrapper[4861]: I0129 06:56:12.681277 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 06:56:12 crc kubenswrapper[4861]: I0129 06:56:12.681291 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:12 crc kubenswrapper[4861]: I0129 06:56:12.681303 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 06:56:13 crc kubenswrapper[4861]: I0129 06:56:13.695956 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerStarted","Data":"48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9"} Jan 29 06:56:14 crc kubenswrapper[4861]: I0129 06:56:14.704138 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:56:14 crc kubenswrapper[4861]: I0129 06:56:14.704439 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:56:14 crc kubenswrapper[4861]: I0129 06:56:14.704157 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:56:14 crc kubenswrapper[4861]: I0129 06:56:14.704537 4861 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 06:56:14 crc kubenswrapper[4861]: I0129 06:56:14.877282 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:14 crc kubenswrapper[4861]: I0129 06:56:14.935542 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 06:56:14 crc kubenswrapper[4861]: I0129 06:56:14.974624 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 06:56:15 crc kubenswrapper[4861]: I0129 06:56:15.035975 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 06:56:15 crc kubenswrapper[4861]: I0129 06:56:15.036027 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 06:56:15 crc kubenswrapper[4861]: I0129 06:56:15.715232 4861 generic.go:334] "Generic (PLEG): container finished" podID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerID="48d76bc12530564c4b2eebc89cc5dfe44093d2a0f960d299491520d1b1bce1ec" exitCode=0 Jan 29 06:56:15 crc kubenswrapper[4861]: I0129 06:56:15.716187 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-96c87b9b6-8r8w7" event={"ID":"e3f66a07-77f8-476d-80bf-f6cc152cfd17","Type":"ContainerDied","Data":"48d76bc12530564c4b2eebc89cc5dfe44093d2a0f960d299491520d1b1bce1ec"} Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.502433 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.630934 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-config\") pod \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.631359 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-combined-ca-bundle\") pod \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.631456 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-httpd-config\") pod \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.631578 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q9ft\" (UniqueName: \"kubernetes.io/projected/e3f66a07-77f8-476d-80bf-f6cc152cfd17-kube-api-access-4q9ft\") pod \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.631609 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-ovndb-tls-certs\") pod \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\" (UID: \"e3f66a07-77f8-476d-80bf-f6cc152cfd17\") " Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.637176 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "e3f66a07-77f8-476d-80bf-f6cc152cfd17" (UID: "e3f66a07-77f8-476d-80bf-f6cc152cfd17"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.639344 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3f66a07-77f8-476d-80bf-f6cc152cfd17-kube-api-access-4q9ft" (OuterVolumeSpecName: "kube-api-access-4q9ft") pod "e3f66a07-77f8-476d-80bf-f6cc152cfd17" (UID: "e3f66a07-77f8-476d-80bf-f6cc152cfd17"). InnerVolumeSpecName "kube-api-access-4q9ft". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.684806 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3f66a07-77f8-476d-80bf-f6cc152cfd17" (UID: "e3f66a07-77f8-476d-80bf-f6cc152cfd17"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.685717 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-config" (OuterVolumeSpecName: "config") pod "e3f66a07-77f8-476d-80bf-f6cc152cfd17" (UID: "e3f66a07-77f8-476d-80bf-f6cc152cfd17"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.735806 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.735836 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.735848 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.735857 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q9ft\" (UniqueName: \"kubernetes.io/projected/e3f66a07-77f8-476d-80bf-f6cc152cfd17-kube-api-access-4q9ft\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.754282 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "e3f66a07-77f8-476d-80bf-f6cc152cfd17" (UID: "e3f66a07-77f8-476d-80bf-f6cc152cfd17"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.779139 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-96c87b9b6-8r8w7" event={"ID":"e3f66a07-77f8-476d-80bf-f6cc152cfd17","Type":"ContainerDied","Data":"818682027ce0ed5a7b7fae2e9118020dfbfc79320f2727273da6a82672e866f5"} Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.779189 4861 scope.go:117] "RemoveContainer" containerID="484505ca42014af98f6cbd0a6ac9cd3f6acfa09af05357b188ddd191011787db" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.779343 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-96c87b9b6-8r8w7" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.816146 4861 scope.go:117] "RemoveContainer" containerID="48d76bc12530564c4b2eebc89cc5dfe44093d2a0f960d299491520d1b1bce1ec" Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.818120 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-96c87b9b6-8r8w7"] Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.826665 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-96c87b9b6-8r8w7"] Jan 29 06:56:20 crc kubenswrapper[4861]: I0129 06:56:20.836864 4861 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3f66a07-77f8-476d-80bf-f6cc152cfd17-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:21 crc kubenswrapper[4861]: I0129 06:56:21.127434 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" path="/var/lib/kubelet/pods/e3f66a07-77f8-476d-80bf-f6cc152cfd17/volumes" Jan 29 06:56:21 crc kubenswrapper[4861]: I0129 06:56:21.795183 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerStarted","Data":"f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14"} Jan 29 06:56:21 crc kubenswrapper[4861]: I0129 06:56:21.797455 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" event={"ID":"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2","Type":"ContainerStarted","Data":"cebbe28835a9e678bb4753a3aaa2c633f2bc639bbdc370c3518908db12d571a1"} Jan 29 06:56:21 crc kubenswrapper[4861]: I0129 06:56:21.818273 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" podStartSLOduration=2.7833864630000003 podStartE2EDuration="15.818256466s" podCreationTimestamp="2026-01-29 06:56:06 +0000 UTC" firstStartedPulling="2026-01-29 06:56:07.481651625 +0000 UTC m=+1259.153146172" lastFinishedPulling="2026-01-29 06:56:20.516521618 +0000 UTC m=+1272.188016175" observedRunningTime="2026-01-29 06:56:21.811580902 +0000 UTC m=+1273.483075459" watchObservedRunningTime="2026-01-29 06:56:21.818256466 +0000 UTC m=+1273.489751033" Jan 29 06:56:23 crc kubenswrapper[4861]: I0129 06:56:23.830784 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerStarted","Data":"d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3"} Jan 29 06:56:23 crc kubenswrapper[4861]: I0129 06:56:23.831403 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 06:56:23 crc kubenswrapper[4861]: I0129 06:56:23.831097 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="proxy-httpd" containerID="cri-o://d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3" gracePeriod=30 Jan 29 06:56:23 crc kubenswrapper[4861]: I0129 06:56:23.830981 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="ceilometer-central-agent" containerID="cri-o://07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7" gracePeriod=30 Jan 29 06:56:23 crc kubenswrapper[4861]: I0129 06:56:23.831179 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="ceilometer-notification-agent" containerID="cri-o://48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9" gracePeriod=30 Jan 29 06:56:23 crc kubenswrapper[4861]: I0129 06:56:23.831175 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="sg-core" containerID="cri-o://f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14" gracePeriod=30 Jan 29 06:56:23 crc kubenswrapper[4861]: I0129 06:56:23.882653 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.775511824 podStartE2EDuration="15.882630071s" podCreationTimestamp="2026-01-29 06:56:08 +0000 UTC" firstStartedPulling="2026-01-29 06:56:09.566261225 +0000 UTC m=+1261.237755782" lastFinishedPulling="2026-01-29 06:56:22.673379462 +0000 UTC m=+1274.344874029" observedRunningTime="2026-01-29 06:56:23.864204832 +0000 UTC m=+1275.535699479" watchObservedRunningTime="2026-01-29 06:56:23.882630071 +0000 UTC m=+1275.554124658" Jan 29 06:56:24 crc kubenswrapper[4861]: I0129 06:56:24.841176 4861 generic.go:334] "Generic (PLEG): container finished" podID="e47eb4f6-0667-4299-8604-63d076967426" containerID="d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3" exitCode=0 Jan 29 06:56:24 crc kubenswrapper[4861]: I0129 06:56:24.841207 4861 generic.go:334] "Generic (PLEG): container finished" podID="e47eb4f6-0667-4299-8604-63d076967426" containerID="f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14" exitCode=2 Jan 29 06:56:24 crc kubenswrapper[4861]: I0129 06:56:24.841216 4861 generic.go:334] "Generic (PLEG): container finished" podID="e47eb4f6-0667-4299-8604-63d076967426" containerID="07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7" exitCode=0 Jan 29 06:56:24 crc kubenswrapper[4861]: I0129 06:56:24.841235 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerDied","Data":"d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3"} Jan 29 06:56:24 crc kubenswrapper[4861]: I0129 06:56:24.841270 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerDied","Data":"f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14"} Jan 29 06:56:24 crc kubenswrapper[4861]: I0129 06:56:24.841282 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerDied","Data":"07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7"} Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.448710 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.619913 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-combined-ca-bundle\") pod \"e47eb4f6-0667-4299-8604-63d076967426\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.620056 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45ljz\" (UniqueName: \"kubernetes.io/projected/e47eb4f6-0667-4299-8604-63d076967426-kube-api-access-45ljz\") pod \"e47eb4f6-0667-4299-8604-63d076967426\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.620820 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-ceilometer-tls-certs\") pod \"e47eb4f6-0667-4299-8604-63d076967426\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.620848 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-scripts\") pod \"e47eb4f6-0667-4299-8604-63d076967426\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.620886 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-sg-core-conf-yaml\") pod \"e47eb4f6-0667-4299-8604-63d076967426\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.620922 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-log-httpd\") pod \"e47eb4f6-0667-4299-8604-63d076967426\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.620948 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-config-data\") pod \"e47eb4f6-0667-4299-8604-63d076967426\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.620988 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-run-httpd\") pod \"e47eb4f6-0667-4299-8604-63d076967426\" (UID: \"e47eb4f6-0667-4299-8604-63d076967426\") " Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.621379 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e47eb4f6-0667-4299-8604-63d076967426" (UID: "e47eb4f6-0667-4299-8604-63d076967426"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.621498 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e47eb4f6-0667-4299-8604-63d076967426" (UID: "e47eb4f6-0667-4299-8604-63d076967426"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.621888 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.621905 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e47eb4f6-0667-4299-8604-63d076967426-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.632345 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e47eb4f6-0667-4299-8604-63d076967426-kube-api-access-45ljz" (OuterVolumeSpecName: "kube-api-access-45ljz") pod "e47eb4f6-0667-4299-8604-63d076967426" (UID: "e47eb4f6-0667-4299-8604-63d076967426"). InnerVolumeSpecName "kube-api-access-45ljz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.632361 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-scripts" (OuterVolumeSpecName: "scripts") pod "e47eb4f6-0667-4299-8604-63d076967426" (UID: "e47eb4f6-0667-4299-8604-63d076967426"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.654486 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e47eb4f6-0667-4299-8604-63d076967426" (UID: "e47eb4f6-0667-4299-8604-63d076967426"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.704435 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e47eb4f6-0667-4299-8604-63d076967426" (UID: "e47eb4f6-0667-4299-8604-63d076967426"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.718530 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e47eb4f6-0667-4299-8604-63d076967426" (UID: "e47eb4f6-0667-4299-8604-63d076967426"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.723253 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.723289 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.723302 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.723313 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.723324 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45ljz\" (UniqueName: \"kubernetes.io/projected/e47eb4f6-0667-4299-8604-63d076967426-kube-api-access-45ljz\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.753201 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-config-data" (OuterVolumeSpecName: "config-data") pod "e47eb4f6-0667-4299-8604-63d076967426" (UID: "e47eb4f6-0667-4299-8604-63d076967426"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.825375 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e47eb4f6-0667-4299-8604-63d076967426-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.849673 4861 generic.go:334] "Generic (PLEG): container finished" podID="e47eb4f6-0667-4299-8604-63d076967426" containerID="48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9" exitCode=0 Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.849725 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerDied","Data":"48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9"} Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.849765 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e47eb4f6-0667-4299-8604-63d076967426","Type":"ContainerDied","Data":"af3a564c861a7ac12fdd20b02b1e3b5945ecc575003e7ebdafa41e31ce6fe894"} Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.849773 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.849788 4861 scope.go:117] "RemoveContainer" containerID="d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.866634 4861 scope.go:117] "RemoveContainer" containerID="f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.883374 4861 scope.go:117] "RemoveContainer" containerID="48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.885130 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.896302 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.906362 4861 scope.go:117] "RemoveContainer" containerID="07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.917537 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.917855 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerName="neutron-api" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.917871 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerName="neutron-api" Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.917887 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="proxy-httpd" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.917893 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="proxy-httpd" Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.917908 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="ceilometer-notification-agent" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.917914 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="ceilometer-notification-agent" Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.917929 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerName="neutron-httpd" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.917935 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerName="neutron-httpd" Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.917945 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="sg-core" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.917950 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="sg-core" Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.917962 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="ceilometer-central-agent" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.917969 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="ceilometer-central-agent" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.918131 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="sg-core" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.918144 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="ceilometer-notification-agent" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.918158 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerName="neutron-httpd" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.918167 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="proxy-httpd" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.918182 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e47eb4f6-0667-4299-8604-63d076967426" containerName="ceilometer-central-agent" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.918193 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3f66a07-77f8-476d-80bf-f6cc152cfd17" containerName="neutron-api" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.919919 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.925797 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.928533 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.935031 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.958247 4861 scope.go:117] "RemoveContainer" containerID="d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.958396 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.959644 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3\": container with ID starting with d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3 not found: ID does not exist" containerID="d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.959693 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3"} err="failed to get container status \"d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3\": rpc error: code = NotFound desc = could not find container \"d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3\": container with ID starting with d0705706d777891271de6e26df7af70296e7e7853e692db3343a14550580eff3 not found: ID does not exist" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.959730 4861 scope.go:117] "RemoveContainer" containerID="f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14" Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.963479 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14\": container with ID starting with f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14 not found: ID does not exist" containerID="f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.963533 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14"} err="failed to get container status \"f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14\": rpc error: code = NotFound desc = could not find container \"f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14\": container with ID starting with f273c9205a95e01b4462e4e654b1be3bd0350904246e76e2fc6976451624dc14 not found: ID does not exist" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.963558 4861 scope.go:117] "RemoveContainer" containerID="48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9" Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.978730 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9\": container with ID starting with 48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9 not found: ID does not exist" containerID="48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.978777 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9"} err="failed to get container status \"48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9\": rpc error: code = NotFound desc = could not find container \"48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9\": container with ID starting with 48b885c4da09b350ed384610791492b2a3c56c61630ec0c3860548b1d41d6cf9 not found: ID does not exist" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.978801 4861 scope.go:117] "RemoveContainer" containerID="07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7" Jan 29 06:56:25 crc kubenswrapper[4861]: E0129 06:56:25.982331 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7\": container with ID starting with 07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7 not found: ID does not exist" containerID="07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7" Jan 29 06:56:25 crc kubenswrapper[4861]: I0129 06:56:25.982373 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7"} err="failed to get container status \"07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7\": rpc error: code = NotFound desc = could not find container \"07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7\": container with ID starting with 07e82641f87661bc6c01dfdb2a0c851d1f2b4b4eb19ac490f88709194d2e60a7 not found: ID does not exist" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.032099 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-scripts\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.032145 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.032185 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.032247 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tf799\" (UniqueName: \"kubernetes.io/projected/45d4f324-a524-4c0f-95d3-e66abb327a0b-kube-api-access-tf799\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.032289 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-config-data\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.032304 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-log-httpd\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.032332 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-run-httpd\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.032361 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134190 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-run-httpd\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134248 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134287 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-scripts\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134305 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134332 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134386 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tf799\" (UniqueName: \"kubernetes.io/projected/45d4f324-a524-4c0f-95d3-e66abb327a0b-kube-api-access-tf799\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134427 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-config-data\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134443 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-log-httpd\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.134841 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-log-httpd\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.135102 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-run-httpd\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.140811 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.141006 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.141325 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.141817 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-scripts\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.144283 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-config-data\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.152627 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tf799\" (UniqueName: \"kubernetes.io/projected/45d4f324-a524-4c0f-95d3-e66abb327a0b-kube-api-access-tf799\") pod \"ceilometer-0\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.268290 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.745200 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:26 crc kubenswrapper[4861]: W0129 06:56:26.746346 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45d4f324_a524_4c0f_95d3_e66abb327a0b.slice/crio-e449b5d86b5ae2f611bcf7a90ab3d3f9e6ba1ebba85e41cdb15494822423ea14 WatchSource:0}: Error finding container e449b5d86b5ae2f611bcf7a90ab3d3f9e6ba1ebba85e41cdb15494822423ea14: Status 404 returned error can't find the container with id e449b5d86b5ae2f611bcf7a90ab3d3f9e6ba1ebba85e41cdb15494822423ea14 Jan 29 06:56:26 crc kubenswrapper[4861]: I0129 06:56:26.858696 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerStarted","Data":"e449b5d86b5ae2f611bcf7a90ab3d3f9e6ba1ebba85e41cdb15494822423ea14"} Jan 29 06:56:27 crc kubenswrapper[4861]: I0129 06:56:27.129047 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e47eb4f6-0667-4299-8604-63d076967426" path="/var/lib/kubelet/pods/e47eb4f6-0667-4299-8604-63d076967426/volumes" Jan 29 06:56:27 crc kubenswrapper[4861]: I0129 06:56:27.680332 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:27 crc kubenswrapper[4861]: I0129 06:56:27.879371 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerStarted","Data":"83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d"} Jan 29 06:56:28 crc kubenswrapper[4861]: I0129 06:56:28.888576 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerStarted","Data":"9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede"} Jan 29 06:56:28 crc kubenswrapper[4861]: I0129 06:56:28.888865 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerStarted","Data":"cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af"} Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.630116 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.630774 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.908753 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerStarted","Data":"e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828"} Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.908893 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="ceilometer-central-agent" containerID="cri-o://83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d" gracePeriod=30 Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.908945 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="ceilometer-notification-agent" containerID="cri-o://cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af" gracePeriod=30 Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.908956 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="proxy-httpd" containerID="cri-o://e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828" gracePeriod=30 Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.909032 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="sg-core" containerID="cri-o://9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede" gracePeriod=30 Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.909147 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 06:56:30 crc kubenswrapper[4861]: I0129 06:56:30.940664 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.242481971 podStartE2EDuration="5.940646453s" podCreationTimestamp="2026-01-29 06:56:25 +0000 UTC" firstStartedPulling="2026-01-29 06:56:26.748690314 +0000 UTC m=+1278.420184871" lastFinishedPulling="2026-01-29 06:56:30.446854796 +0000 UTC m=+1282.118349353" observedRunningTime="2026-01-29 06:56:30.939161715 +0000 UTC m=+1282.610656282" watchObservedRunningTime="2026-01-29 06:56:30.940646453 +0000 UTC m=+1282.612141010" Jan 29 06:56:31 crc kubenswrapper[4861]: I0129 06:56:31.950794 4861 generic.go:334] "Generic (PLEG): container finished" podID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerID="e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828" exitCode=0 Jan 29 06:56:31 crc kubenswrapper[4861]: I0129 06:56:31.951285 4861 generic.go:334] "Generic (PLEG): container finished" podID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerID="9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede" exitCode=2 Jan 29 06:56:31 crc kubenswrapper[4861]: I0129 06:56:31.951302 4861 generic.go:334] "Generic (PLEG): container finished" podID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerID="cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af" exitCode=0 Jan 29 06:56:31 crc kubenswrapper[4861]: I0129 06:56:31.950894 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerDied","Data":"e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828"} Jan 29 06:56:31 crc kubenswrapper[4861]: I0129 06:56:31.951409 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerDied","Data":"9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede"} Jan 29 06:56:31 crc kubenswrapper[4861]: I0129 06:56:31.951424 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerDied","Data":"cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af"} Jan 29 06:56:31 crc kubenswrapper[4861]: I0129 06:56:31.954547 4861 generic.go:334] "Generic (PLEG): container finished" podID="fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" containerID="cebbe28835a9e678bb4753a3aaa2c633f2bc639bbdc370c3518908db12d571a1" exitCode=0 Jan 29 06:56:31 crc kubenswrapper[4861]: I0129 06:56:31.954608 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" event={"ID":"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2","Type":"ContainerDied","Data":"cebbe28835a9e678bb4753a3aaa2c633f2bc639bbdc370c3518908db12d571a1"} Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.455898 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.610003 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9gvq\" (UniqueName: \"kubernetes.io/projected/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-kube-api-access-w9gvq\") pod \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.610104 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-combined-ca-bundle\") pod \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.610191 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-scripts\") pod \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.610322 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-config-data\") pod \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\" (UID: \"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2\") " Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.616782 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-scripts" (OuterVolumeSpecName: "scripts") pod "fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" (UID: "fd03c3d0-dbd6-487e-90c9-9b58458a7cf2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.617495 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-kube-api-access-w9gvq" (OuterVolumeSpecName: "kube-api-access-w9gvq") pod "fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" (UID: "fd03c3d0-dbd6-487e-90c9-9b58458a7cf2"). InnerVolumeSpecName "kube-api-access-w9gvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.644863 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-config-data" (OuterVolumeSpecName: "config-data") pod "fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" (UID: "fd03c3d0-dbd6-487e-90c9-9b58458a7cf2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.658219 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" (UID: "fd03c3d0-dbd6-487e-90c9-9b58458a7cf2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.712192 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.712229 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.712239 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.712248 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9gvq\" (UniqueName: \"kubernetes.io/projected/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2-kube-api-access-w9gvq\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.978047 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" event={"ID":"fd03c3d0-dbd6-487e-90c9-9b58458a7cf2","Type":"ContainerDied","Data":"000d3aae986ed0bf5e2eba1be47a9de7ab4f1633a4b49c4b4253c7e84a8e38e8"} Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.978138 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-nc5kl" Jan 29 06:56:33 crc kubenswrapper[4861]: I0129 06:56:33.978148 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="000d3aae986ed0bf5e2eba1be47a9de7ab4f1633a4b49c4b4253c7e84a8e38e8" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.128369 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:34 crc kubenswrapper[4861]: E0129 06:56:34.128819 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" containerName="nova-cell0-conductor-db-sync" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.128840 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" containerName="nova-cell0-conductor-db-sync" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.129118 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" containerName="nova-cell0-conductor-db-sync" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.129820 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.133782 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mlgng" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.134850 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.137025 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.325019 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.325086 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6sdp\" (UniqueName: \"kubernetes.io/projected/54546e01-d758-4937-af03-f9dac86690e1-kube-api-access-z6sdp\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.325719 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.427890 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.427936 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6sdp\" (UniqueName: \"kubernetes.io/projected/54546e01-d758-4937-af03-f9dac86690e1-kube-api-access-z6sdp\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.427989 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.439973 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.440119 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.445304 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6sdp\" (UniqueName: \"kubernetes.io/projected/54546e01-d758-4937-af03-f9dac86690e1-kube-api-access-z6sdp\") pod \"nova-cell0-conductor-0\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:34 crc kubenswrapper[4861]: I0129 06:56:34.499055 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:35 crc kubenswrapper[4861]: I0129 06:56:35.000422 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:36 crc kubenswrapper[4861]: I0129 06:56:36.015598 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"54546e01-d758-4937-af03-f9dac86690e1","Type":"ContainerStarted","Data":"6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7"} Jan 29 06:56:36 crc kubenswrapper[4861]: I0129 06:56:36.015647 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"54546e01-d758-4937-af03-f9dac86690e1","Type":"ContainerStarted","Data":"5144faa91befd8da0a3d28047b5d7c6c3a594bd63797c23f646a4f1431636cb3"} Jan 29 06:56:36 crc kubenswrapper[4861]: I0129 06:56:36.015749 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:36 crc kubenswrapper[4861]: I0129 06:56:36.034624 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.034610705 podStartE2EDuration="2.034610705s" podCreationTimestamp="2026-01-29 06:56:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:36.031090804 +0000 UTC m=+1287.702585361" watchObservedRunningTime="2026-01-29 06:56:36.034610705 +0000 UTC m=+1287.706105262" Jan 29 06:56:37 crc kubenswrapper[4861]: I0129 06:56:37.939522 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.039849 4861 generic.go:334] "Generic (PLEG): container finished" podID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerID="83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d" exitCode=0 Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.039895 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerDied","Data":"83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d"} Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.039929 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45d4f324-a524-4c0f-95d3-e66abb327a0b","Type":"ContainerDied","Data":"e449b5d86b5ae2f611bcf7a90ab3d3f9e6ba1ebba85e41cdb15494822423ea14"} Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.039965 4861 scope.go:117] "RemoveContainer" containerID="e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.039965 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.075771 4861 scope.go:117] "RemoveContainer" containerID="9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.095437 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-combined-ca-bundle\") pod \"45d4f324-a524-4c0f-95d3-e66abb327a0b\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.095507 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-config-data\") pod \"45d4f324-a524-4c0f-95d3-e66abb327a0b\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.095532 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-run-httpd\") pod \"45d4f324-a524-4c0f-95d3-e66abb327a0b\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.095577 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-log-httpd\") pod \"45d4f324-a524-4c0f-95d3-e66abb327a0b\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.095612 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-ceilometer-tls-certs\") pod \"45d4f324-a524-4c0f-95d3-e66abb327a0b\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.095709 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-scripts\") pod \"45d4f324-a524-4c0f-95d3-e66abb327a0b\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.095733 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-sg-core-conf-yaml\") pod \"45d4f324-a524-4c0f-95d3-e66abb327a0b\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.095750 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tf799\" (UniqueName: \"kubernetes.io/projected/45d4f324-a524-4c0f-95d3-e66abb327a0b-kube-api-access-tf799\") pod \"45d4f324-a524-4c0f-95d3-e66abb327a0b\" (UID: \"45d4f324-a524-4c0f-95d3-e66abb327a0b\") " Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.102112 4861 scope.go:117] "RemoveContainer" containerID="cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.102711 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "45d4f324-a524-4c0f-95d3-e66abb327a0b" (UID: "45d4f324-a524-4c0f-95d3-e66abb327a0b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.102995 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "45d4f324-a524-4c0f-95d3-e66abb327a0b" (UID: "45d4f324-a524-4c0f-95d3-e66abb327a0b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.105567 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45d4f324-a524-4c0f-95d3-e66abb327a0b-kube-api-access-tf799" (OuterVolumeSpecName: "kube-api-access-tf799") pod "45d4f324-a524-4c0f-95d3-e66abb327a0b" (UID: "45d4f324-a524-4c0f-95d3-e66abb327a0b"). InnerVolumeSpecName "kube-api-access-tf799". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.105615 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-scripts" (OuterVolumeSpecName: "scripts") pod "45d4f324-a524-4c0f-95d3-e66abb327a0b" (UID: "45d4f324-a524-4c0f-95d3-e66abb327a0b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.130672 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "45d4f324-a524-4c0f-95d3-e66abb327a0b" (UID: "45d4f324-a524-4c0f-95d3-e66abb327a0b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.155289 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "45d4f324-a524-4c0f-95d3-e66abb327a0b" (UID: "45d4f324-a524-4c0f-95d3-e66abb327a0b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.197696 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.197733 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.197747 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tf799\" (UniqueName: \"kubernetes.io/projected/45d4f324-a524-4c0f-95d3-e66abb327a0b-kube-api-access-tf799\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.197760 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.197774 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45d4f324-a524-4c0f-95d3-e66abb327a0b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.197784 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.202398 4861 scope.go:117] "RemoveContainer" containerID="83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.208239 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-config-data" (OuterVolumeSpecName: "config-data") pod "45d4f324-a524-4c0f-95d3-e66abb327a0b" (UID: "45d4f324-a524-4c0f-95d3-e66abb327a0b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.210463 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45d4f324-a524-4c0f-95d3-e66abb327a0b" (UID: "45d4f324-a524-4c0f-95d3-e66abb327a0b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.223723 4861 scope.go:117] "RemoveContainer" containerID="e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828" Jan 29 06:56:38 crc kubenswrapper[4861]: E0129 06:56:38.224219 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828\": container with ID starting with e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828 not found: ID does not exist" containerID="e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.224272 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828"} err="failed to get container status \"e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828\": rpc error: code = NotFound desc = could not find container \"e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828\": container with ID starting with e41a59b2a83c9ef8a38cca37d6038d075294d2c87d7167289ed7e0d9a1fe9828 not found: ID does not exist" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.224304 4861 scope.go:117] "RemoveContainer" containerID="9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede" Jan 29 06:56:38 crc kubenswrapper[4861]: E0129 06:56:38.224768 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede\": container with ID starting with 9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede not found: ID does not exist" containerID="9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.224818 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede"} err="failed to get container status \"9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede\": rpc error: code = NotFound desc = could not find container \"9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede\": container with ID starting with 9d4ae64cf662d64a7d895ac8b9d3120c41793bf7a08de7fcbc9cd1618087cede not found: ID does not exist" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.224849 4861 scope.go:117] "RemoveContainer" containerID="cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af" Jan 29 06:56:38 crc kubenswrapper[4861]: E0129 06:56:38.225259 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af\": container with ID starting with cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af not found: ID does not exist" containerID="cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.225314 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af"} err="failed to get container status \"cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af\": rpc error: code = NotFound desc = could not find container \"cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af\": container with ID starting with cf857747645d827147a5986e8fbf5da2d1ab302ccad0e1e069e4c478b0c838af not found: ID does not exist" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.225343 4861 scope.go:117] "RemoveContainer" containerID="83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d" Jan 29 06:56:38 crc kubenswrapper[4861]: E0129 06:56:38.225843 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d\": container with ID starting with 83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d not found: ID does not exist" containerID="83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.225883 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d"} err="failed to get container status \"83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d\": rpc error: code = NotFound desc = could not find container \"83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d\": container with ID starting with 83f347046af2fa71ca82250b8f088bc581bb82ac411243436a5412ebe2153a2d not found: ID does not exist" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.299686 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.299747 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d4f324-a524-4c0f-95d3-e66abb327a0b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.385820 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.403061 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.431473 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:38 crc kubenswrapper[4861]: E0129 06:56:38.431979 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="ceilometer-central-agent" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.432014 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="ceilometer-central-agent" Jan 29 06:56:38 crc kubenswrapper[4861]: E0129 06:56:38.432043 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="ceilometer-notification-agent" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.432054 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="ceilometer-notification-agent" Jan 29 06:56:38 crc kubenswrapper[4861]: E0129 06:56:38.432069 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="sg-core" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.432105 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="sg-core" Jan 29 06:56:38 crc kubenswrapper[4861]: E0129 06:56:38.432135 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="proxy-httpd" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.432149 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="proxy-httpd" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.432427 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="proxy-httpd" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.432455 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="ceilometer-central-agent" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.432485 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="sg-core" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.432501 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" containerName="ceilometer-notification-agent" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.434695 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.438674 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.438712 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.438901 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.450489 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.605640 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-log-httpd\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.605680 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-run-httpd\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.605707 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.605724 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.605755 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-scripts\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.605781 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.605801 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-config-data\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.605864 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgmfh\" (UniqueName: \"kubernetes.io/projected/7bec75d5-ed1c-4f93-8769-ac58fe792b70-kube-api-access-rgmfh\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708035 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-log-httpd\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708185 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-run-httpd\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708275 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708331 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708428 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-scripts\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708501 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708568 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-config-data\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708768 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgmfh\" (UniqueName: \"kubernetes.io/projected/7bec75d5-ed1c-4f93-8769-ac58fe792b70-kube-api-access-rgmfh\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.708793 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-run-httpd\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.709340 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-log-httpd\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.717446 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.725037 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-config-data\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.725590 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.730169 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.730744 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-scripts\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.733762 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgmfh\" (UniqueName: \"kubernetes.io/projected/7bec75d5-ed1c-4f93-8769-ac58fe792b70-kube-api-access-rgmfh\") pod \"ceilometer-0\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " pod="openstack/ceilometer-0" Jan 29 06:56:38 crc kubenswrapper[4861]: I0129 06:56:38.764942 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:39 crc kubenswrapper[4861]: I0129 06:56:39.126587 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45d4f324-a524-4c0f-95d3-e66abb327a0b" path="/var/lib/kubelet/pods/45d4f324-a524-4c0f-95d3-e66abb327a0b/volumes" Jan 29 06:56:39 crc kubenswrapper[4861]: I0129 06:56:39.284405 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:40 crc kubenswrapper[4861]: I0129 06:56:40.060492 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerStarted","Data":"e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe"} Jan 29 06:56:40 crc kubenswrapper[4861]: I0129 06:56:40.060971 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerStarted","Data":"f4b2142948b8c6570d14dd563a65be7565cacd9102f00e2083498274845d3107"} Jan 29 06:56:40 crc kubenswrapper[4861]: I0129 06:56:40.096701 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:40 crc kubenswrapper[4861]: I0129 06:56:40.096888 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="54546e01-d758-4937-af03-f9dac86690e1" containerName="nova-cell0-conductor-conductor" containerID="cri-o://6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7" gracePeriod=30 Jan 29 06:56:40 crc kubenswrapper[4861]: E0129 06:56:40.102664 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:56:40 crc kubenswrapper[4861]: E0129 06:56:40.106914 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:56:40 crc kubenswrapper[4861]: E0129 06:56:40.109642 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:56:40 crc kubenswrapper[4861]: E0129 06:56:40.109698 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="54546e01-d758-4937-af03-f9dac86690e1" containerName="nova-cell0-conductor-conductor" Jan 29 06:56:40 crc kubenswrapper[4861]: I0129 06:56:40.329894 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:41 crc kubenswrapper[4861]: I0129 06:56:41.103345 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerStarted","Data":"5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d"} Jan 29 06:56:42 crc kubenswrapper[4861]: I0129 06:56:42.128802 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerStarted","Data":"78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a"} Jan 29 06:56:42 crc kubenswrapper[4861]: I0129 06:56:42.798049 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:42 crc kubenswrapper[4861]: I0129 06:56:42.992224 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-config-data\") pod \"54546e01-d758-4937-af03-f9dac86690e1\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " Jan 29 06:56:42 crc kubenswrapper[4861]: I0129 06:56:42.992451 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-combined-ca-bundle\") pod \"54546e01-d758-4937-af03-f9dac86690e1\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " Jan 29 06:56:42 crc kubenswrapper[4861]: I0129 06:56:42.992546 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6sdp\" (UniqueName: \"kubernetes.io/projected/54546e01-d758-4937-af03-f9dac86690e1-kube-api-access-z6sdp\") pod \"54546e01-d758-4937-af03-f9dac86690e1\" (UID: \"54546e01-d758-4937-af03-f9dac86690e1\") " Jan 29 06:56:42 crc kubenswrapper[4861]: I0129 06:56:42.997978 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54546e01-d758-4937-af03-f9dac86690e1-kube-api-access-z6sdp" (OuterVolumeSpecName: "kube-api-access-z6sdp") pod "54546e01-d758-4937-af03-f9dac86690e1" (UID: "54546e01-d758-4937-af03-f9dac86690e1"). InnerVolumeSpecName "kube-api-access-z6sdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.023209 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-config-data" (OuterVolumeSpecName: "config-data") pod "54546e01-d758-4937-af03-f9dac86690e1" (UID: "54546e01-d758-4937-af03-f9dac86690e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.024246 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54546e01-d758-4937-af03-f9dac86690e1" (UID: "54546e01-d758-4937-af03-f9dac86690e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.100410 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.100446 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54546e01-d758-4937-af03-f9dac86690e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.100458 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6sdp\" (UniqueName: \"kubernetes.io/projected/54546e01-d758-4937-af03-f9dac86690e1-kube-api-access-z6sdp\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.143619 4861 generic.go:334] "Generic (PLEG): container finished" podID="54546e01-d758-4937-af03-f9dac86690e1" containerID="6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7" exitCode=0 Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.143665 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"54546e01-d758-4937-af03-f9dac86690e1","Type":"ContainerDied","Data":"6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7"} Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.143708 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"54546e01-d758-4937-af03-f9dac86690e1","Type":"ContainerDied","Data":"5144faa91befd8da0a3d28047b5d7c6c3a594bd63797c23f646a4f1431636cb3"} Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.143725 4861 scope.go:117] "RemoveContainer" containerID="6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.143878 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.195319 4861 scope.go:117] "RemoveContainer" containerID="6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.195381 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:43 crc kubenswrapper[4861]: E0129 06:56:43.196276 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7\": container with ID starting with 6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7 not found: ID does not exist" containerID="6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.196310 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7"} err="failed to get container status \"6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7\": rpc error: code = NotFound desc = could not find container \"6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7\": container with ID starting with 6dbbe3ead53ce0cd84417b4168df784d19b4bb7ea4af7eab4d2ec5cd17ef74b7 not found: ID does not exist" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.222880 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.235019 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:43 crc kubenswrapper[4861]: E0129 06:56:43.235473 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54546e01-d758-4937-af03-f9dac86690e1" containerName="nova-cell0-conductor-conductor" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.235497 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="54546e01-d758-4937-af03-f9dac86690e1" containerName="nova-cell0-conductor-conductor" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.235675 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="54546e01-d758-4937-af03-f9dac86690e1" containerName="nova-cell0-conductor-conductor" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.236354 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.240164 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.240355 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mlgng" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.247745 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.304040 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.304376 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsz82\" (UniqueName: \"kubernetes.io/projected/d72b59e5-64c2-4eab-955e-89d6298e834e-kube-api-access-rsz82\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.304435 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.405468 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsz82\" (UniqueName: \"kubernetes.io/projected/d72b59e5-64c2-4eab-955e-89d6298e834e-kube-api-access-rsz82\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.405806 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.405958 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.409633 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.410293 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.429357 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsz82\" (UniqueName: \"kubernetes.io/projected/d72b59e5-64c2-4eab-955e-89d6298e834e-kube-api-access-rsz82\") pod \"nova-cell0-conductor-0\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:43 crc kubenswrapper[4861]: I0129 06:56:43.555178 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.078833 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.153572 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerStarted","Data":"9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61"} Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.153717 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="ceilometer-central-agent" containerID="cri-o://e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe" gracePeriod=30 Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.154026 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.154333 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="proxy-httpd" containerID="cri-o://9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61" gracePeriod=30 Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.154416 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="sg-core" containerID="cri-o://78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a" gracePeriod=30 Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.154452 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="ceilometer-notification-agent" containerID="cri-o://5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d" gracePeriod=30 Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.156045 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d72b59e5-64c2-4eab-955e-89d6298e834e","Type":"ContainerStarted","Data":"84da739768030e27f5faa0b4a707ce4784ad756c7694fd1b62ad8eb84653921e"} Jan 29 06:56:44 crc kubenswrapper[4861]: I0129 06:56:44.182182 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.44944982 podStartE2EDuration="6.182166019s" podCreationTimestamp="2026-01-29 06:56:38 +0000 UTC" firstStartedPulling="2026-01-29 06:56:39.299031499 +0000 UTC m=+1290.970526066" lastFinishedPulling="2026-01-29 06:56:43.031747708 +0000 UTC m=+1294.703242265" observedRunningTime="2026-01-29 06:56:44.177407176 +0000 UTC m=+1295.848901743" watchObservedRunningTime="2026-01-29 06:56:44.182166019 +0000 UTC m=+1295.853660576" Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.144579 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54546e01-d758-4937-af03-f9dac86690e1" path="/var/lib/kubelet/pods/54546e01-d758-4937-af03-f9dac86690e1/volumes" Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.171699 4861 generic.go:334] "Generic (PLEG): container finished" podID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerID="9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61" exitCode=0 Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.171748 4861 generic.go:334] "Generic (PLEG): container finished" podID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerID="78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a" exitCode=2 Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.171761 4861 generic.go:334] "Generic (PLEG): container finished" podID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerID="5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d" exitCode=0 Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.171815 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerDied","Data":"9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61"} Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.171852 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerDied","Data":"78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a"} Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.171871 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerDied","Data":"5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d"} Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.173897 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d72b59e5-64c2-4eab-955e-89d6298e834e","Type":"ContainerStarted","Data":"6756abe8a1b0340d3aa8c881cb242ac677f7ceda5ed061f5774b08d04e550a63"} Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.175449 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:45 crc kubenswrapper[4861]: I0129 06:56:45.205645 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.205623105 podStartE2EDuration="2.205623105s" podCreationTimestamp="2026-01-29 06:56:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:45.198598283 +0000 UTC m=+1296.870092850" watchObservedRunningTime="2026-01-29 06:56:45.205623105 +0000 UTC m=+1296.877117672" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.762623 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.891495 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-scripts\") pod \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.891557 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-config-data\") pod \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.891580 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-combined-ca-bundle\") pod \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.891630 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-log-httpd\") pod \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.891647 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-ceilometer-tls-certs\") pod \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.891672 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgmfh\" (UniqueName: \"kubernetes.io/projected/7bec75d5-ed1c-4f93-8769-ac58fe792b70-kube-api-access-rgmfh\") pod \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.891760 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-run-httpd\") pod \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.892184 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7bec75d5-ed1c-4f93-8769-ac58fe792b70" (UID: "7bec75d5-ed1c-4f93-8769-ac58fe792b70"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.892376 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7bec75d5-ed1c-4f93-8769-ac58fe792b70" (UID: "7bec75d5-ed1c-4f93-8769-ac58fe792b70"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.892457 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-sg-core-conf-yaml\") pod \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\" (UID: \"7bec75d5-ed1c-4f93-8769-ac58fe792b70\") " Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.892812 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.892829 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bec75d5-ed1c-4f93-8769-ac58fe792b70-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.897326 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bec75d5-ed1c-4f93-8769-ac58fe792b70-kube-api-access-rgmfh" (OuterVolumeSpecName: "kube-api-access-rgmfh") pod "7bec75d5-ed1c-4f93-8769-ac58fe792b70" (UID: "7bec75d5-ed1c-4f93-8769-ac58fe792b70"). InnerVolumeSpecName "kube-api-access-rgmfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.903212 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-scripts" (OuterVolumeSpecName: "scripts") pod "7bec75d5-ed1c-4f93-8769-ac58fe792b70" (UID: "7bec75d5-ed1c-4f93-8769-ac58fe792b70"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.945250 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7bec75d5-ed1c-4f93-8769-ac58fe792b70" (UID: "7bec75d5-ed1c-4f93-8769-ac58fe792b70"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.960675 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7bec75d5-ed1c-4f93-8769-ac58fe792b70" (UID: "7bec75d5-ed1c-4f93-8769-ac58fe792b70"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.989624 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bec75d5-ed1c-4f93-8769-ac58fe792b70" (UID: "7bec75d5-ed1c-4f93-8769-ac58fe792b70"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.994717 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.994744 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.994755 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.994765 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgmfh\" (UniqueName: \"kubernetes.io/projected/7bec75d5-ed1c-4f93-8769-ac58fe792b70-kube-api-access-rgmfh\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:47 crc kubenswrapper[4861]: I0129 06:56:47.994774 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.007719 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-config-data" (OuterVolumeSpecName: "config-data") pod "7bec75d5-ed1c-4f93-8769-ac58fe792b70" (UID: "7bec75d5-ed1c-4f93-8769-ac58fe792b70"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.096790 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec75d5-ed1c-4f93-8769-ac58fe792b70-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.204399 4861 generic.go:334] "Generic (PLEG): container finished" podID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerID="e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe" exitCode=0 Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.204435 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerDied","Data":"e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe"} Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.204460 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bec75d5-ed1c-4f93-8769-ac58fe792b70","Type":"ContainerDied","Data":"f4b2142948b8c6570d14dd563a65be7565cacd9102f00e2083498274845d3107"} Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.204475 4861 scope.go:117] "RemoveContainer" containerID="9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.204555 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.227595 4861 scope.go:117] "RemoveContainer" containerID="78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.253029 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.268162 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.269262 4861 scope.go:117] "RemoveContainer" containerID="5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.281986 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:48 crc kubenswrapper[4861]: E0129 06:56:48.282442 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="ceilometer-central-agent" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.282463 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="ceilometer-central-agent" Jan 29 06:56:48 crc kubenswrapper[4861]: E0129 06:56:48.282497 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="proxy-httpd" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.282506 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="proxy-httpd" Jan 29 06:56:48 crc kubenswrapper[4861]: E0129 06:56:48.282523 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="sg-core" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.282533 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="sg-core" Jan 29 06:56:48 crc kubenswrapper[4861]: E0129 06:56:48.282544 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="ceilometer-notification-agent" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.282552 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="ceilometer-notification-agent" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.282757 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="ceilometer-notification-agent" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.282797 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="ceilometer-central-agent" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.282810 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="sg-core" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.282824 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" containerName="proxy-httpd" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.284746 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.288231 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.288751 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.289415 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.298774 4861 scope.go:117] "RemoveContainer" containerID="e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.307220 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.330648 4861 scope.go:117] "RemoveContainer" containerID="9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61" Jan 29 06:56:48 crc kubenswrapper[4861]: E0129 06:56:48.331401 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61\": container with ID starting with 9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61 not found: ID does not exist" containerID="9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.331449 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61"} err="failed to get container status \"9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61\": rpc error: code = NotFound desc = could not find container \"9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61\": container with ID starting with 9c5edc61f4fd8e9a02b3ddefd49b1ca7645dd54456e102556edd5cae2559ba61 not found: ID does not exist" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.331477 4861 scope.go:117] "RemoveContainer" containerID="78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a" Jan 29 06:56:48 crc kubenswrapper[4861]: E0129 06:56:48.332024 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a\": container with ID starting with 78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a not found: ID does not exist" containerID="78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.332161 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a"} err="failed to get container status \"78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a\": rpc error: code = NotFound desc = could not find container \"78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a\": container with ID starting with 78c00d6135d15a8fb3e8b6a253466c23f4d593db0b8d510bf76647140e017f0a not found: ID does not exist" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.332208 4861 scope.go:117] "RemoveContainer" containerID="5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d" Jan 29 06:56:48 crc kubenswrapper[4861]: E0129 06:56:48.332559 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d\": container with ID starting with 5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d not found: ID does not exist" containerID="5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.332598 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d"} err="failed to get container status \"5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d\": rpc error: code = NotFound desc = could not find container \"5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d\": container with ID starting with 5aef1b7b213b1c5a690fe1fe197153c01fd990cbf2efa0f445bc6444628ee81d not found: ID does not exist" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.332626 4861 scope.go:117] "RemoveContainer" containerID="e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe" Jan 29 06:56:48 crc kubenswrapper[4861]: E0129 06:56:48.334342 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe\": container with ID starting with e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe not found: ID does not exist" containerID="e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.334397 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe"} err="failed to get container status \"e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe\": rpc error: code = NotFound desc = could not find container \"e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe\": container with ID starting with e46ee371663e7a6901e086e151cfbfb391d07f112044b140ec85404e7ebe4bbe not found: ID does not exist" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.401482 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-log-httpd\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.401554 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-run-httpd\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.401766 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.401883 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ts7b\" (UniqueName: \"kubernetes.io/projected/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-kube-api-access-7ts7b\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.402063 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-config-data\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.402136 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.402184 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-scripts\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.402436 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.503812 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-log-httpd\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.503902 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-run-httpd\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.503979 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.504025 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ts7b\" (UniqueName: \"kubernetes.io/projected/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-kube-api-access-7ts7b\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.504253 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-config-data\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.504293 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.504323 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-scripts\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.504428 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.505747 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-run-httpd\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.506113 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-log-httpd\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.512262 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-scripts\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.513416 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.513612 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.514723 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.516002 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-config-data\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.539497 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ts7b\" (UniqueName: \"kubernetes.io/projected/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-kube-api-access-7ts7b\") pod \"ceilometer-0\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " pod="openstack/ceilometer-0" Jan 29 06:56:48 crc kubenswrapper[4861]: I0129 06:56:48.611668 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:56:49 crc kubenswrapper[4861]: I0129 06:56:49.090468 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:56:49 crc kubenswrapper[4861]: W0129 06:56:49.095731 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod740f35ca_86a8_45d8_86d2_4cbc1ca7e148.slice/crio-398e930f0f9679b8ac4568171cffffbc2bb1ae8279ef5f7e7c2dcde6d6d6455e WatchSource:0}: Error finding container 398e930f0f9679b8ac4568171cffffbc2bb1ae8279ef5f7e7c2dcde6d6d6455e: Status 404 returned error can't find the container with id 398e930f0f9679b8ac4568171cffffbc2bb1ae8279ef5f7e7c2dcde6d6d6455e Jan 29 06:56:49 crc kubenswrapper[4861]: I0129 06:56:49.142109 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bec75d5-ed1c-4f93-8769-ac58fe792b70" path="/var/lib/kubelet/pods/7bec75d5-ed1c-4f93-8769-ac58fe792b70/volumes" Jan 29 06:56:49 crc kubenswrapper[4861]: I0129 06:56:49.214398 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerStarted","Data":"398e930f0f9679b8ac4568171cffffbc2bb1ae8279ef5f7e7c2dcde6d6d6455e"} Jan 29 06:56:50 crc kubenswrapper[4861]: I0129 06:56:50.226036 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerStarted","Data":"c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c"} Jan 29 06:56:51 crc kubenswrapper[4861]: I0129 06:56:51.238757 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerStarted","Data":"ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb"} Jan 29 06:56:51 crc kubenswrapper[4861]: I0129 06:56:51.239350 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerStarted","Data":"18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613"} Jan 29 06:56:53 crc kubenswrapper[4861]: I0129 06:56:53.274451 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerStarted","Data":"918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a"} Jan 29 06:56:53 crc kubenswrapper[4861]: I0129 06:56:53.275254 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 06:56:53 crc kubenswrapper[4861]: I0129 06:56:53.315622 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8614705040000001 podStartE2EDuration="5.315581472s" podCreationTimestamp="2026-01-29 06:56:48 +0000 UTC" firstStartedPulling="2026-01-29 06:56:49.098996264 +0000 UTC m=+1300.770490861" lastFinishedPulling="2026-01-29 06:56:52.553107262 +0000 UTC m=+1304.224601829" observedRunningTime="2026-01-29 06:56:53.308506359 +0000 UTC m=+1304.980000956" watchObservedRunningTime="2026-01-29 06:56:53.315581472 +0000 UTC m=+1304.987076109" Jan 29 06:56:53 crc kubenswrapper[4861]: I0129 06:56:53.594668 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.162250 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-zbm2j"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.164822 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.167533 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.168179 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.203322 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-zbm2j"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.325625 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-scripts\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.325725 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-config-data\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.325771 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.325794 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpgzq\" (UniqueName: \"kubernetes.io/projected/7e8fdf87-ff09-46c0-b508-f8f01e57290e-kube-api-access-gpgzq\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.326577 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.328205 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.332093 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.345175 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.362735 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.377135 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.380584 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.389138 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.402649 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.404037 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.407934 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.426771 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.427697 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.427734 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-config-data\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.427784 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-scripts\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.427876 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-config-data\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.427935 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1079014-dbe9-4626-9fab-cde23e57636e-logs\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.427954 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.427976 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpgzq\" (UniqueName: \"kubernetes.io/projected/7e8fdf87-ff09-46c0-b508-f8f01e57290e-kube-api-access-gpgzq\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.428051 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg6jt\" (UniqueName: \"kubernetes.io/projected/d1079014-dbe9-4626-9fab-cde23e57636e-kube-api-access-fg6jt\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.441417 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.450565 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-scripts\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.452671 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-config-data\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.460215 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpgzq\" (UniqueName: \"kubernetes.io/projected/7e8fdf87-ff09-46c0-b508-f8f01e57290e-kube-api-access-gpgzq\") pod \"nova-cell0-cell-mapping-zbm2j\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.527620 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.530991 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531053 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531123 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fg6jt\" (UniqueName: \"kubernetes.io/projected/d1079014-dbe9-4626-9fab-cde23e57636e-kube-api-access-fg6jt\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531145 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-config-data\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531179 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531199 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-config-data\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531215 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531233 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njw4f\" (UniqueName: \"kubernetes.io/projected/196bdecd-ee6f-4dc0-9d5b-a591ce688792-kube-api-access-njw4f\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531309 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzprp\" (UniqueName: \"kubernetes.io/projected/b7d2bf83-0528-4d71-9700-73548000f02c-kube-api-access-dzprp\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.531333 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1079014-dbe9-4626-9fab-cde23e57636e-logs\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.532107 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1079014-dbe9-4626-9fab-cde23e57636e-logs\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.542691 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.543948 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-config-data\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.568897 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg6jt\" (UniqueName: \"kubernetes.io/projected/d1079014-dbe9-4626-9fab-cde23e57636e-kube-api-access-fg6jt\") pod \"nova-api-0\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.574733 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.576643 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.596266 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.633038 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.633092 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njw4f\" (UniqueName: \"kubernetes.io/projected/196bdecd-ee6f-4dc0-9d5b-a591ce688792-kube-api-access-njw4f\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.633161 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzprp\" (UniqueName: \"kubernetes.io/projected/b7d2bf83-0528-4d71-9700-73548000f02c-kube-api-access-dzprp\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.633194 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.633233 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.633276 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-config-data\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.642262 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.644154 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.659608 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-config-data\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.674403 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.710143 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.711384 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.715832 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzprp\" (UniqueName: \"kubernetes.io/projected/b7d2bf83-0528-4d71-9700-73548000f02c-kube-api-access-dzprp\") pod \"nova-cell1-novncproxy-0\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.722687 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njw4f\" (UniqueName: \"kubernetes.io/projected/196bdecd-ee6f-4dc0-9d5b-a591ce688792-kube-api-access-njw4f\") pod \"nova-scheduler-0\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " pod="openstack/nova-scheduler-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.734789 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsmb6\" (UniqueName: \"kubernetes.io/projected/400c0df4-8462-42aa-92e1-2c8d5e0b1099-kube-api-access-fsmb6\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.734864 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-config-data\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.734908 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400c0df4-8462-42aa-92e1-2c8d5e0b1099-logs\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.735159 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.769126 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-grc22"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.770623 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.799912 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-grc22"] Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840061 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-nb\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840129 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-swift-storage-0\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840190 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k56d8\" (UniqueName: \"kubernetes.io/projected/4caf0ea1-b9a5-478e-be1d-d7c48d185348-kube-api-access-k56d8\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840217 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-svc\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840239 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840264 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-config\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840297 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsmb6\" (UniqueName: \"kubernetes.io/projected/400c0df4-8462-42aa-92e1-2c8d5e0b1099-kube-api-access-fsmb6\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840324 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-sb\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840376 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-config-data\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840400 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400c0df4-8462-42aa-92e1-2c8d5e0b1099-logs\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.840773 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400c0df4-8462-42aa-92e1-2c8d5e0b1099-logs\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.844830 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.849611 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-config-data\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.874891 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsmb6\" (UniqueName: \"kubernetes.io/projected/400c0df4-8462-42aa-92e1-2c8d5e0b1099-kube-api-access-fsmb6\") pod \"nova-metadata-0\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " pod="openstack/nova-metadata-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.932321 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.942358 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-sb\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.942466 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-nb\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.942491 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-swift-storage-0\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.942544 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k56d8\" (UniqueName: \"kubernetes.io/projected/4caf0ea1-b9a5-478e-be1d-d7c48d185348-kube-api-access-k56d8\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.942567 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-svc\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.942595 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-config\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.943496 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-config\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.944012 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-sb\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.947615 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-svc\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.947102 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-swift-storage-0\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.951500 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-nb\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:54 crc kubenswrapper[4861]: I0129 06:56:54.974685 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k56d8\" (UniqueName: \"kubernetes.io/projected/4caf0ea1-b9a5-478e-be1d-d7c48d185348-kube-api-access-k56d8\") pod \"dnsmasq-dns-647df7b8c5-grc22\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.000131 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.049980 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.105915 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.346033 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-zbm2j"] Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.376145 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.531253 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zrv47"] Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.532360 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.542536 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.542821 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.547378 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zrv47"] Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.560102 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.560164 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rjd6\" (UniqueName: \"kubernetes.io/projected/8ee543aa-5e49-42f8-85c3-50911ec59b03-kube-api-access-4rjd6\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.560210 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-config-data\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.560279 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-scripts\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.618237 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.627384 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:56:55 crc kubenswrapper[4861]: W0129 06:56:55.642540 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod196bdecd_ee6f_4dc0_9d5b_a591ce688792.slice/crio-5e5cb260be37368e8ba67082f462a7334a0ce56af960a9bcc5ea40ce4787c0fb WatchSource:0}: Error finding container 5e5cb260be37368e8ba67082f462a7334a0ce56af960a9bcc5ea40ce4787c0fb: Status 404 returned error can't find the container with id 5e5cb260be37368e8ba67082f462a7334a0ce56af960a9bcc5ea40ce4787c0fb Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.661819 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-scripts\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.661913 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.661955 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rjd6\" (UniqueName: \"kubernetes.io/projected/8ee543aa-5e49-42f8-85c3-50911ec59b03-kube-api-access-4rjd6\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.662000 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-config-data\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.666010 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-config-data\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.669716 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-scripts\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.670639 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.685754 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rjd6\" (UniqueName: \"kubernetes.io/projected/8ee543aa-5e49-42f8-85c3-50911ec59b03-kube-api-access-4rjd6\") pod \"nova-cell1-conductor-db-sync-zrv47\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.731417 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.762624 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:56:55 crc kubenswrapper[4861]: I0129 06:56:55.764359 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-grc22"] Jan 29 06:56:56 crc kubenswrapper[4861]: W0129 06:56:56.225283 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ee543aa_5e49_42f8_85c3_50911ec59b03.slice/crio-4f81a98eea54824f5e21c41f062aaf7b01e221c254e87c95fa5afc0925e8847d WatchSource:0}: Error finding container 4f81a98eea54824f5e21c41f062aaf7b01e221c254e87c95fa5afc0925e8847d: Status 404 returned error can't find the container with id 4f81a98eea54824f5e21c41f062aaf7b01e221c254e87c95fa5afc0925e8847d Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.228411 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zrv47"] Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.313437 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"400c0df4-8462-42aa-92e1-2c8d5e0b1099","Type":"ContainerStarted","Data":"634844f507dee49fc97cf0862a231602caedeb2f69eb1b70d964e3ba6848fe42"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.314773 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d1079014-dbe9-4626-9fab-cde23e57636e","Type":"ContainerStarted","Data":"0015da60c82109c244eb9867f3dda46b4585f8b13483f37fb11b8de6ba695638"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.316103 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b7d2bf83-0528-4d71-9700-73548000f02c","Type":"ContainerStarted","Data":"e4824901064fa4cddd04d94cd1fd7a98ce896dd96ad636ab50a1687b9e290630"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.319570 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-zbm2j" event={"ID":"7e8fdf87-ff09-46c0-b508-f8f01e57290e","Type":"ContainerStarted","Data":"331e3c80a98fd0724c50967f39eed912e8c3b62cc52bed54eea4c135e30f8704"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.319616 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-zbm2j" event={"ID":"7e8fdf87-ff09-46c0-b508-f8f01e57290e","Type":"ContainerStarted","Data":"fb937d22989ac7e897bf854f5aed316ac6d0bf5a5cd5b7af2d59911078186bf1"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.321784 4861 generic.go:334] "Generic (PLEG): container finished" podID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" containerID="4b5361eb4f97b1f0c0a43cdfc8bba3497eb615870aae7ed31b56818124abc7f5" exitCode=0 Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.321855 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" event={"ID":"4caf0ea1-b9a5-478e-be1d-d7c48d185348","Type":"ContainerDied","Data":"4b5361eb4f97b1f0c0a43cdfc8bba3497eb615870aae7ed31b56818124abc7f5"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.321882 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" event={"ID":"4caf0ea1-b9a5-478e-be1d-d7c48d185348","Type":"ContainerStarted","Data":"d2ab362d7d883a0d2be0f882d4dc1077f3ef5edba4f31502784aab2243aca5fe"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.324390 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"196bdecd-ee6f-4dc0-9d5b-a591ce688792","Type":"ContainerStarted","Data":"5e5cb260be37368e8ba67082f462a7334a0ce56af960a9bcc5ea40ce4787c0fb"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.329793 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zrv47" event={"ID":"8ee543aa-5e49-42f8-85c3-50911ec59b03","Type":"ContainerStarted","Data":"4f81a98eea54824f5e21c41f062aaf7b01e221c254e87c95fa5afc0925e8847d"} Jan 29 06:56:56 crc kubenswrapper[4861]: I0129 06:56:56.341246 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-zbm2j" podStartSLOduration=2.341221689 podStartE2EDuration="2.341221689s" podCreationTimestamp="2026-01-29 06:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:56.338985211 +0000 UTC m=+1308.010479768" watchObservedRunningTime="2026-01-29 06:56:56.341221689 +0000 UTC m=+1308.012716266" Jan 29 06:56:57 crc kubenswrapper[4861]: I0129 06:56:57.349153 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" event={"ID":"4caf0ea1-b9a5-478e-be1d-d7c48d185348","Type":"ContainerStarted","Data":"1e34d07e14aab00e720ca02ec07f36951679f3fedc5ba47bee1c985e8b91e13e"} Jan 29 06:56:57 crc kubenswrapper[4861]: I0129 06:56:57.349732 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:56:57 crc kubenswrapper[4861]: I0129 06:56:57.354822 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zrv47" event={"ID":"8ee543aa-5e49-42f8-85c3-50911ec59b03","Type":"ContainerStarted","Data":"2f518cab156f62fbd5298f70524670678e1c98100d3ecb6f8936540f50df39da"} Jan 29 06:56:57 crc kubenswrapper[4861]: I0129 06:56:57.381578 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" podStartSLOduration=3.381560422 podStartE2EDuration="3.381560422s" podCreationTimestamp="2026-01-29 06:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:57.37612062 +0000 UTC m=+1309.047615197" watchObservedRunningTime="2026-01-29 06:56:57.381560422 +0000 UTC m=+1309.053054979" Jan 29 06:56:57 crc kubenswrapper[4861]: I0129 06:56:57.408791 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-zrv47" podStartSLOduration=2.408772378 podStartE2EDuration="2.408772378s" podCreationTimestamp="2026-01-29 06:56:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:56:57.396296574 +0000 UTC m=+1309.067791141" watchObservedRunningTime="2026-01-29 06:56:57.408772378 +0000 UTC m=+1309.080266935" Jan 29 06:56:58 crc kubenswrapper[4861]: I0129 06:56:58.684586 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:56:58 crc kubenswrapper[4861]: I0129 06:56:58.724090 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.393695 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"196bdecd-ee6f-4dc0-9d5b-a591ce688792","Type":"ContainerStarted","Data":"8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1"} Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.395687 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"400c0df4-8462-42aa-92e1-2c8d5e0b1099","Type":"ContainerStarted","Data":"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9"} Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.395728 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"400c0df4-8462-42aa-92e1-2c8d5e0b1099","Type":"ContainerStarted","Data":"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab"} Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.395778 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerName="nova-metadata-log" containerID="cri-o://ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab" gracePeriod=30 Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.395791 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerName="nova-metadata-metadata" containerID="cri-o://3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9" gracePeriod=30 Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.399172 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d1079014-dbe9-4626-9fab-cde23e57636e","Type":"ContainerStarted","Data":"bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a"} Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.399221 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d1079014-dbe9-4626-9fab-cde23e57636e","Type":"ContainerStarted","Data":"5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7"} Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.402588 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b7d2bf83-0528-4d71-9700-73548000f02c","Type":"ContainerStarted","Data":"2578f711dc2641e8fba1a7313e4c5059d379b749e689eba9c1b05fac506fd409"} Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.402754 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="b7d2bf83-0528-4d71-9700-73548000f02c" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://2578f711dc2641e8fba1a7313e4c5059d379b749e689eba9c1b05fac506fd409" gracePeriod=30 Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.411312 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.885722831 podStartE2EDuration="6.411292513s" podCreationTimestamp="2026-01-29 06:56:54 +0000 UTC" firstStartedPulling="2026-01-29 06:56:55.644566286 +0000 UTC m=+1307.316060843" lastFinishedPulling="2026-01-29 06:56:59.170135958 +0000 UTC m=+1310.841630525" observedRunningTime="2026-01-29 06:57:00.40999014 +0000 UTC m=+1312.081484687" watchObservedRunningTime="2026-01-29 06:57:00.411292513 +0000 UTC m=+1312.082787060" Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.435539 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.656028279 podStartE2EDuration="6.435521542s" podCreationTimestamp="2026-01-29 06:56:54 +0000 UTC" firstStartedPulling="2026-01-29 06:56:55.381250841 +0000 UTC m=+1307.052745398" lastFinishedPulling="2026-01-29 06:56:59.160744094 +0000 UTC m=+1310.832238661" observedRunningTime="2026-01-29 06:57:00.428787428 +0000 UTC m=+1312.100282005" watchObservedRunningTime="2026-01-29 06:57:00.435521542 +0000 UTC m=+1312.107016099" Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.445622 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.890949296 podStartE2EDuration="6.445608214s" podCreationTimestamp="2026-01-29 06:56:54 +0000 UTC" firstStartedPulling="2026-01-29 06:56:55.616391694 +0000 UTC m=+1307.287886251" lastFinishedPulling="2026-01-29 06:56:59.171050612 +0000 UTC m=+1310.842545169" observedRunningTime="2026-01-29 06:57:00.441437166 +0000 UTC m=+1312.112931733" watchObservedRunningTime="2026-01-29 06:57:00.445608214 +0000 UTC m=+1312.117102761" Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.467453 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.027298497 podStartE2EDuration="6.467433601s" podCreationTimestamp="2026-01-29 06:56:54 +0000 UTC" firstStartedPulling="2026-01-29 06:56:55.730284561 +0000 UTC m=+1307.401779118" lastFinishedPulling="2026-01-29 06:56:59.170419625 +0000 UTC m=+1310.841914222" observedRunningTime="2026-01-29 06:57:00.461081546 +0000 UTC m=+1312.132576123" watchObservedRunningTime="2026-01-29 06:57:00.467433601 +0000 UTC m=+1312.138928158" Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.630013 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:57:00 crc kubenswrapper[4861]: I0129 06:57:00.630103 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.123812 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.208318 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400c0df4-8462-42aa-92e1-2c8d5e0b1099-logs\") pod \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.208523 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-combined-ca-bundle\") pod \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.208569 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-config-data\") pod \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.208643 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsmb6\" (UniqueName: \"kubernetes.io/projected/400c0df4-8462-42aa-92e1-2c8d5e0b1099-kube-api-access-fsmb6\") pod \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\" (UID: \"400c0df4-8462-42aa-92e1-2c8d5e0b1099\") " Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.209219 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/400c0df4-8462-42aa-92e1-2c8d5e0b1099-logs" (OuterVolumeSpecName: "logs") pod "400c0df4-8462-42aa-92e1-2c8d5e0b1099" (UID: "400c0df4-8462-42aa-92e1-2c8d5e0b1099"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.210274 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400c0df4-8462-42aa-92e1-2c8d5e0b1099-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.214093 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/400c0df4-8462-42aa-92e1-2c8d5e0b1099-kube-api-access-fsmb6" (OuterVolumeSpecName: "kube-api-access-fsmb6") pod "400c0df4-8462-42aa-92e1-2c8d5e0b1099" (UID: "400c0df4-8462-42aa-92e1-2c8d5e0b1099"). InnerVolumeSpecName "kube-api-access-fsmb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.241276 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-config-data" (OuterVolumeSpecName: "config-data") pod "400c0df4-8462-42aa-92e1-2c8d5e0b1099" (UID: "400c0df4-8462-42aa-92e1-2c8d5e0b1099"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.253686 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "400c0df4-8462-42aa-92e1-2c8d5e0b1099" (UID: "400c0df4-8462-42aa-92e1-2c8d5e0b1099"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.312214 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.312254 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400c0df4-8462-42aa-92e1-2c8d5e0b1099-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.312267 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsmb6\" (UniqueName: \"kubernetes.io/projected/400c0df4-8462-42aa-92e1-2c8d5e0b1099-kube-api-access-fsmb6\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.416959 4861 generic.go:334] "Generic (PLEG): container finished" podID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerID="3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9" exitCode=0 Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.416999 4861 generic.go:334] "Generic (PLEG): container finished" podID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerID="ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab" exitCode=143 Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.417051 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.417050 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"400c0df4-8462-42aa-92e1-2c8d5e0b1099","Type":"ContainerDied","Data":"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9"} Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.417155 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"400c0df4-8462-42aa-92e1-2c8d5e0b1099","Type":"ContainerDied","Data":"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab"} Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.417218 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"400c0df4-8462-42aa-92e1-2c8d5e0b1099","Type":"ContainerDied","Data":"634844f507dee49fc97cf0862a231602caedeb2f69eb1b70d964e3ba6848fe42"} Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.417246 4861 scope.go:117] "RemoveContainer" containerID="3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.458897 4861 scope.go:117] "RemoveContainer" containerID="ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.473829 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.502293 4861 scope.go:117] "RemoveContainer" containerID="3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9" Jan 29 06:57:01 crc kubenswrapper[4861]: E0129 06:57:01.503910 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9\": container with ID starting with 3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9 not found: ID does not exist" containerID="3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.503949 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9"} err="failed to get container status \"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9\": rpc error: code = NotFound desc = could not find container \"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9\": container with ID starting with 3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9 not found: ID does not exist" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.503973 4861 scope.go:117] "RemoveContainer" containerID="ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab" Jan 29 06:57:01 crc kubenswrapper[4861]: E0129 06:57:01.506630 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab\": container with ID starting with ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab not found: ID does not exist" containerID="ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.506676 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab"} err="failed to get container status \"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab\": rpc error: code = NotFound desc = could not find container \"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab\": container with ID starting with ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab not found: ID does not exist" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.506704 4861 scope.go:117] "RemoveContainer" containerID="3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.506804 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.507196 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9"} err="failed to get container status \"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9\": rpc error: code = NotFound desc = could not find container \"3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9\": container with ID starting with 3000236422c206e1e83e7ed7fec0cbb58cbac1ce5b6d413274472d4b300fbad9 not found: ID does not exist" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.507266 4861 scope.go:117] "RemoveContainer" containerID="ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.507922 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab"} err="failed to get container status \"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab\": rpc error: code = NotFound desc = could not find container \"ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab\": container with ID starting with ea9bc8323422a3d97e4cd644d9d9457f93c4a8584abcda279303fa40a1fa13ab not found: ID does not exist" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.537986 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:01 crc kubenswrapper[4861]: E0129 06:57:01.538733 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerName="nova-metadata-metadata" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.538758 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerName="nova-metadata-metadata" Jan 29 06:57:01 crc kubenswrapper[4861]: E0129 06:57:01.538800 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerName="nova-metadata-log" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.538807 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerName="nova-metadata-log" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.538999 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerName="nova-metadata-log" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.539030 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" containerName="nova-metadata-metadata" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.540014 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.543152 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.555215 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.558230 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.617482 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.617673 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.617789 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkv7r\" (UniqueName: \"kubernetes.io/projected/0e999579-6c0a-44cc-b329-7dbabc461fe4-kube-api-access-pkv7r\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.617864 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-config-data\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.617890 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e999579-6c0a-44cc-b329-7dbabc461fe4-logs\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.720428 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.720753 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.720901 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkv7r\" (UniqueName: \"kubernetes.io/projected/0e999579-6c0a-44cc-b329-7dbabc461fe4-kube-api-access-pkv7r\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.721052 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-config-data\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.721357 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e999579-6c0a-44cc-b329-7dbabc461fe4-logs\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.721700 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e999579-6c0a-44cc-b329-7dbabc461fe4-logs\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.724441 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.734577 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.734739 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-config-data\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.747297 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkv7r\" (UniqueName: \"kubernetes.io/projected/0e999579-6c0a-44cc-b329-7dbabc461fe4-kube-api-access-pkv7r\") pod \"nova-metadata-0\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " pod="openstack/nova-metadata-0" Jan 29 06:57:01 crc kubenswrapper[4861]: I0129 06:57:01.860724 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:02 crc kubenswrapper[4861]: I0129 06:57:02.306311 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:02 crc kubenswrapper[4861]: I0129 06:57:02.432914 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e999579-6c0a-44cc-b329-7dbabc461fe4","Type":"ContainerStarted","Data":"fee686ca8bf3212e7b7b52cb92d697325dc0b2eea12a8493d77111bb78b11d9c"} Jan 29 06:57:03 crc kubenswrapper[4861]: I0129 06:57:03.138856 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="400c0df4-8462-42aa-92e1-2c8d5e0b1099" path="/var/lib/kubelet/pods/400c0df4-8462-42aa-92e1-2c8d5e0b1099/volumes" Jan 29 06:57:03 crc kubenswrapper[4861]: I0129 06:57:03.448500 4861 generic.go:334] "Generic (PLEG): container finished" podID="7e8fdf87-ff09-46c0-b508-f8f01e57290e" containerID="331e3c80a98fd0724c50967f39eed912e8c3b62cc52bed54eea4c135e30f8704" exitCode=0 Jan 29 06:57:03 crc kubenswrapper[4861]: I0129 06:57:03.448624 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-zbm2j" event={"ID":"7e8fdf87-ff09-46c0-b508-f8f01e57290e","Type":"ContainerDied","Data":"331e3c80a98fd0724c50967f39eed912e8c3b62cc52bed54eea4c135e30f8704"} Jan 29 06:57:03 crc kubenswrapper[4861]: I0129 06:57:03.451899 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e999579-6c0a-44cc-b329-7dbabc461fe4","Type":"ContainerStarted","Data":"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb"} Jan 29 06:57:03 crc kubenswrapper[4861]: I0129 06:57:03.451966 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e999579-6c0a-44cc-b329-7dbabc461fe4","Type":"ContainerStarted","Data":"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a"} Jan 29 06:57:03 crc kubenswrapper[4861]: I0129 06:57:03.498986 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.49895317 podStartE2EDuration="2.49895317s" podCreationTimestamp="2026-01-29 06:57:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:03.496033124 +0000 UTC m=+1315.167527751" watchObservedRunningTime="2026-01-29 06:57:03.49895317 +0000 UTC m=+1315.170447767" Jan 29 06:57:04 crc kubenswrapper[4861]: I0129 06:57:04.466996 4861 generic.go:334] "Generic (PLEG): container finished" podID="8ee543aa-5e49-42f8-85c3-50911ec59b03" containerID="2f518cab156f62fbd5298f70524670678e1c98100d3ecb6f8936540f50df39da" exitCode=0 Jan 29 06:57:04 crc kubenswrapper[4861]: I0129 06:57:04.467133 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zrv47" event={"ID":"8ee543aa-5e49-42f8-85c3-50911ec59b03","Type":"ContainerDied","Data":"2f518cab156f62fbd5298f70524670678e1c98100d3ecb6f8936540f50df39da"} Jan 29 06:57:04 crc kubenswrapper[4861]: I0129 06:57:04.675184 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 06:57:04 crc kubenswrapper[4861]: I0129 06:57:04.675293 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 06:57:04 crc kubenswrapper[4861]: I0129 06:57:04.924673 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:57:04 crc kubenswrapper[4861]: I0129 06:57:04.933674 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.000988 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.001040 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.011554 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-config-data\") pod \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.011701 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpgzq\" (UniqueName: \"kubernetes.io/projected/7e8fdf87-ff09-46c0-b508-f8f01e57290e-kube-api-access-gpgzq\") pod \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.011872 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-combined-ca-bundle\") pod \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.011909 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-scripts\") pod \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\" (UID: \"7e8fdf87-ff09-46c0-b508-f8f01e57290e\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.021313 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-scripts" (OuterVolumeSpecName: "scripts") pod "7e8fdf87-ff09-46c0-b508-f8f01e57290e" (UID: "7e8fdf87-ff09-46c0-b508-f8f01e57290e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.021408 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e8fdf87-ff09-46c0-b508-f8f01e57290e-kube-api-access-gpgzq" (OuterVolumeSpecName: "kube-api-access-gpgzq") pod "7e8fdf87-ff09-46c0-b508-f8f01e57290e" (UID: "7e8fdf87-ff09-46c0-b508-f8f01e57290e"). InnerVolumeSpecName "kube-api-access-gpgzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.036350 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.053664 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-config-data" (OuterVolumeSpecName: "config-data") pod "7e8fdf87-ff09-46c0-b508-f8f01e57290e" (UID: "7e8fdf87-ff09-46c0-b508-f8f01e57290e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.077894 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e8fdf87-ff09-46c0-b508-f8f01e57290e" (UID: "7e8fdf87-ff09-46c0-b508-f8f01e57290e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.108696 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.115460 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.115505 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.115515 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e8fdf87-ff09-46c0-b508-f8f01e57290e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.115523 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpgzq\" (UniqueName: \"kubernetes.io/projected/7e8fdf87-ff09-46c0-b508-f8f01e57290e-kube-api-access-gpgzq\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.199442 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-b5m5w"] Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.199815 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" podUID="4e650b78-7110-40bc-b59e-fdaea7ecc619" containerName="dnsmasq-dns" containerID="cri-o://5fe9c53e0fcd2cd94a63cb0cd318a1c2b1d8f53237727d05a34a0f21c8837e14" gracePeriod=10 Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.481532 4861 generic.go:334] "Generic (PLEG): container finished" podID="4e650b78-7110-40bc-b59e-fdaea7ecc619" containerID="5fe9c53e0fcd2cd94a63cb0cd318a1c2b1d8f53237727d05a34a0f21c8837e14" exitCode=0 Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.481595 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" event={"ID":"4e650b78-7110-40bc-b59e-fdaea7ecc619","Type":"ContainerDied","Data":"5fe9c53e0fcd2cd94a63cb0cd318a1c2b1d8f53237727d05a34a0f21c8837e14"} Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.484357 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-zbm2j" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.484758 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-zbm2j" event={"ID":"7e8fdf87-ff09-46c0-b508-f8f01e57290e","Type":"ContainerDied","Data":"fb937d22989ac7e897bf854f5aed316ac6d0bf5a5cd5b7af2d59911078186bf1"} Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.484777 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb937d22989ac7e897bf854f5aed316ac6d0bf5a5cd5b7af2d59911078186bf1" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.542313 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.603548 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.662980 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.663192 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-log" containerID="cri-o://5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7" gracePeriod=30 Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.663594 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-api" containerID="cri-o://bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a" gracePeriod=30 Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.671914 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": EOF" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.671933 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": EOF" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.673616 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.673842 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerName="nova-metadata-log" containerID="cri-o://68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a" gracePeriod=30 Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.674240 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerName="nova-metadata-metadata" containerID="cri-o://a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb" gracePeriod=30 Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.734916 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-svc\") pod \"4e650b78-7110-40bc-b59e-fdaea7ecc619\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.735265 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-config\") pod \"4e650b78-7110-40bc-b59e-fdaea7ecc619\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.735332 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7x5w\" (UniqueName: \"kubernetes.io/projected/4e650b78-7110-40bc-b59e-fdaea7ecc619-kube-api-access-m7x5w\") pod \"4e650b78-7110-40bc-b59e-fdaea7ecc619\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.735365 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-sb\") pod \"4e650b78-7110-40bc-b59e-fdaea7ecc619\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.735385 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-swift-storage-0\") pod \"4e650b78-7110-40bc-b59e-fdaea7ecc619\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.735470 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-nb\") pod \"4e650b78-7110-40bc-b59e-fdaea7ecc619\" (UID: \"4e650b78-7110-40bc-b59e-fdaea7ecc619\") " Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.749801 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e650b78-7110-40bc-b59e-fdaea7ecc619-kube-api-access-m7x5w" (OuterVolumeSpecName: "kube-api-access-m7x5w") pod "4e650b78-7110-40bc-b59e-fdaea7ecc619" (UID: "4e650b78-7110-40bc-b59e-fdaea7ecc619"). InnerVolumeSpecName "kube-api-access-m7x5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.825007 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4e650b78-7110-40bc-b59e-fdaea7ecc619" (UID: "4e650b78-7110-40bc-b59e-fdaea7ecc619"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.827581 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4e650b78-7110-40bc-b59e-fdaea7ecc619" (UID: "4e650b78-7110-40bc-b59e-fdaea7ecc619"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.828710 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4e650b78-7110-40bc-b59e-fdaea7ecc619" (UID: "4e650b78-7110-40bc-b59e-fdaea7ecc619"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.837205 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7x5w\" (UniqueName: \"kubernetes.io/projected/4e650b78-7110-40bc-b59e-fdaea7ecc619-kube-api-access-m7x5w\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.837227 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.837236 4861 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.837246 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.845325 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4e650b78-7110-40bc-b59e-fdaea7ecc619" (UID: "4e650b78-7110-40bc-b59e-fdaea7ecc619"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.851702 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-config" (OuterVolumeSpecName: "config") pod "4e650b78-7110-40bc-b59e-fdaea7ecc619" (UID: "4e650b78-7110-40bc-b59e-fdaea7ecc619"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.954195 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.954234 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e650b78-7110-40bc-b59e-fdaea7ecc619-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:05 crc kubenswrapper[4861]: I0129 06:57:05.993461 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.102753 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.160497 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rjd6\" (UniqueName: \"kubernetes.io/projected/8ee543aa-5e49-42f8-85c3-50911ec59b03-kube-api-access-4rjd6\") pod \"8ee543aa-5e49-42f8-85c3-50911ec59b03\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.160658 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-combined-ca-bundle\") pod \"8ee543aa-5e49-42f8-85c3-50911ec59b03\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.160789 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-config-data\") pod \"8ee543aa-5e49-42f8-85c3-50911ec59b03\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.160826 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-scripts\") pod \"8ee543aa-5e49-42f8-85c3-50911ec59b03\" (UID: \"8ee543aa-5e49-42f8-85c3-50911ec59b03\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.163600 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ee543aa-5e49-42f8-85c3-50911ec59b03-kube-api-access-4rjd6" (OuterVolumeSpecName: "kube-api-access-4rjd6") pod "8ee543aa-5e49-42f8-85c3-50911ec59b03" (UID: "8ee543aa-5e49-42f8-85c3-50911ec59b03"). InnerVolumeSpecName "kube-api-access-4rjd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.163828 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-scripts" (OuterVolumeSpecName: "scripts") pod "8ee543aa-5e49-42f8-85c3-50911ec59b03" (UID: "8ee543aa-5e49-42f8-85c3-50911ec59b03"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.192578 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ee543aa-5e49-42f8-85c3-50911ec59b03" (UID: "8ee543aa-5e49-42f8-85c3-50911ec59b03"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.195895 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-config-data" (OuterVolumeSpecName: "config-data") pod "8ee543aa-5e49-42f8-85c3-50911ec59b03" (UID: "8ee543aa-5e49-42f8-85c3-50911ec59b03"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.262949 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.262980 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.262990 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rjd6\" (UniqueName: \"kubernetes.io/projected/8ee543aa-5e49-42f8-85c3-50911ec59b03-kube-api-access-4rjd6\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.263000 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ee543aa-5e49-42f8-85c3-50911ec59b03-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.355041 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.465109 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-config-data\") pod \"0e999579-6c0a-44cc-b329-7dbabc461fe4\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.465196 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkv7r\" (UniqueName: \"kubernetes.io/projected/0e999579-6c0a-44cc-b329-7dbabc461fe4-kube-api-access-pkv7r\") pod \"0e999579-6c0a-44cc-b329-7dbabc461fe4\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.465382 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e999579-6c0a-44cc-b329-7dbabc461fe4-logs\") pod \"0e999579-6c0a-44cc-b329-7dbabc461fe4\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.465715 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e999579-6c0a-44cc-b329-7dbabc461fe4-logs" (OuterVolumeSpecName: "logs") pod "0e999579-6c0a-44cc-b329-7dbabc461fe4" (UID: "0e999579-6c0a-44cc-b329-7dbabc461fe4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.465802 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-nova-metadata-tls-certs\") pod \"0e999579-6c0a-44cc-b329-7dbabc461fe4\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.465836 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-combined-ca-bundle\") pod \"0e999579-6c0a-44cc-b329-7dbabc461fe4\" (UID: \"0e999579-6c0a-44cc-b329-7dbabc461fe4\") " Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.466185 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e999579-6c0a-44cc-b329-7dbabc461fe4-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.468819 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e999579-6c0a-44cc-b329-7dbabc461fe4-kube-api-access-pkv7r" (OuterVolumeSpecName: "kube-api-access-pkv7r") pod "0e999579-6c0a-44cc-b329-7dbabc461fe4" (UID: "0e999579-6c0a-44cc-b329-7dbabc461fe4"). InnerVolumeSpecName "kube-api-access-pkv7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.495902 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-config-data" (OuterVolumeSpecName: "config-data") pod "0e999579-6c0a-44cc-b329-7dbabc461fe4" (UID: "0e999579-6c0a-44cc-b329-7dbabc461fe4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.501265 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e999579-6c0a-44cc-b329-7dbabc461fe4" (UID: "0e999579-6c0a-44cc-b329-7dbabc461fe4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.503329 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zrv47" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.503302 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zrv47" event={"ID":"8ee543aa-5e49-42f8-85c3-50911ec59b03","Type":"ContainerDied","Data":"4f81a98eea54824f5e21c41f062aaf7b01e221c254e87c95fa5afc0925e8847d"} Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.503471 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f81a98eea54824f5e21c41f062aaf7b01e221c254e87c95fa5afc0925e8847d" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.506534 4861 generic.go:334] "Generic (PLEG): container finished" podID="d1079014-dbe9-4626-9fab-cde23e57636e" containerID="5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7" exitCode=143 Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.506606 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d1079014-dbe9-4626-9fab-cde23e57636e","Type":"ContainerDied","Data":"5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7"} Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.513054 4861 generic.go:334] "Generic (PLEG): container finished" podID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerID="a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb" exitCode=0 Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.513095 4861 generic.go:334] "Generic (PLEG): container finished" podID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerID="68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a" exitCode=143 Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.513146 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e999579-6c0a-44cc-b329-7dbabc461fe4","Type":"ContainerDied","Data":"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb"} Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.513172 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e999579-6c0a-44cc-b329-7dbabc461fe4","Type":"ContainerDied","Data":"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a"} Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.513184 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e999579-6c0a-44cc-b329-7dbabc461fe4","Type":"ContainerDied","Data":"fee686ca8bf3212e7b7b52cb92d697325dc0b2eea12a8493d77111bb78b11d9c"} Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.513198 4861 scope.go:117] "RemoveContainer" containerID="a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.513311 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.517621 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" event={"ID":"4e650b78-7110-40bc-b59e-fdaea7ecc619","Type":"ContainerDied","Data":"f50725bc115507278db4f3b8d7af78365597fc1c209e3b81f486c538dfe8188b"} Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.520316 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75dbb546bf-b5m5w" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.546653 4861 scope.go:117] "RemoveContainer" containerID="68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.562422 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 06:57:06 crc kubenswrapper[4861]: E0129 06:57:06.562812 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ee543aa-5e49-42f8-85c3-50911ec59b03" containerName="nova-cell1-conductor-db-sync" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.562833 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ee543aa-5e49-42f8-85c3-50911ec59b03" containerName="nova-cell1-conductor-db-sync" Jan 29 06:57:06 crc kubenswrapper[4861]: E0129 06:57:06.562853 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerName="nova-metadata-log" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.562859 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerName="nova-metadata-log" Jan 29 06:57:06 crc kubenswrapper[4861]: E0129 06:57:06.562873 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e650b78-7110-40bc-b59e-fdaea7ecc619" containerName="dnsmasq-dns" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.562879 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e650b78-7110-40bc-b59e-fdaea7ecc619" containerName="dnsmasq-dns" Jan 29 06:57:06 crc kubenswrapper[4861]: E0129 06:57:06.562890 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerName="nova-metadata-metadata" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.562896 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerName="nova-metadata-metadata" Jan 29 06:57:06 crc kubenswrapper[4861]: E0129 06:57:06.562911 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e8fdf87-ff09-46c0-b508-f8f01e57290e" containerName="nova-manage" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.562917 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e8fdf87-ff09-46c0-b508-f8f01e57290e" containerName="nova-manage" Jan 29 06:57:06 crc kubenswrapper[4861]: E0129 06:57:06.562929 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e650b78-7110-40bc-b59e-fdaea7ecc619" containerName="init" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.562935 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e650b78-7110-40bc-b59e-fdaea7ecc619" containerName="init" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.563108 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerName="nova-metadata-log" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.563119 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" containerName="nova-metadata-metadata" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.563131 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e8fdf87-ff09-46c0-b508-f8f01e57290e" containerName="nova-manage" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.563139 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ee543aa-5e49-42f8-85c3-50911ec59b03" containerName="nova-cell1-conductor-db-sync" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.563154 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e650b78-7110-40bc-b59e-fdaea7ecc619" containerName="dnsmasq-dns" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.563713 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.572231 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.573266 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.573299 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.573312 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkv7r\" (UniqueName: \"kubernetes.io/projected/0e999579-6c0a-44cc-b329-7dbabc461fe4-kube-api-access-pkv7r\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.576419 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "0e999579-6c0a-44cc-b329-7dbabc461fe4" (UID: "0e999579-6c0a-44cc-b329-7dbabc461fe4"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.591339 4861 scope.go:117] "RemoveContainer" containerID="a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb" Jan 29 06:57:06 crc kubenswrapper[4861]: E0129 06:57:06.594167 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb\": container with ID starting with a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb not found: ID does not exist" containerID="a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.594203 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb"} err="failed to get container status \"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb\": rpc error: code = NotFound desc = could not find container \"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb\": container with ID starting with a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb not found: ID does not exist" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.594227 4861 scope.go:117] "RemoveContainer" containerID="68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a" Jan 29 06:57:06 crc kubenswrapper[4861]: E0129 06:57:06.611967 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a\": container with ID starting with 68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a not found: ID does not exist" containerID="68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.612047 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a"} err="failed to get container status \"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a\": rpc error: code = NotFound desc = could not find container \"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a\": container with ID starting with 68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a not found: ID does not exist" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.612109 4861 scope.go:117] "RemoveContainer" containerID="a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.619211 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.619569 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb"} err="failed to get container status \"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb\": rpc error: code = NotFound desc = could not find container \"a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb\": container with ID starting with a0ba14b30eb55f380fae957a440c81c9eeabc76a5a5e9aa3d5530c1f272f70cb not found: ID does not exist" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.619620 4861 scope.go:117] "RemoveContainer" containerID="68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.620070 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a"} err="failed to get container status \"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a\": rpc error: code = NotFound desc = could not find container \"68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a\": container with ID starting with 68e2bbf7e6a0699db0a49e8e275bb957957ca99b2da30f18c573453a2570c95a not found: ID does not exist" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.620097 4861 scope.go:117] "RemoveContainer" containerID="5fe9c53e0fcd2cd94a63cb0cd318a1c2b1d8f53237727d05a34a0f21c8837e14" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.640176 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-b5m5w"] Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.650127 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-b5m5w"] Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.653939 4861 scope.go:117] "RemoveContainer" containerID="be60875a2de6cb8b593b08067130af18fa891776e7648f570ca9ab20a47f98b3" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.675179 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.675237 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhvz8\" (UniqueName: \"kubernetes.io/projected/c749a121-7e8e-4d49-8a30-c27fa21926b5-kube-api-access-rhvz8\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.675273 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.675324 4861 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e999579-6c0a-44cc-b329-7dbabc461fe4-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.777028 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.777097 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhvz8\" (UniqueName: \"kubernetes.io/projected/c749a121-7e8e-4d49-8a30-c27fa21926b5-kube-api-access-rhvz8\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.777143 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.786114 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.786656 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.801777 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhvz8\" (UniqueName: \"kubernetes.io/projected/c749a121-7e8e-4d49-8a30-c27fa21926b5-kube-api-access-rhvz8\") pod \"nova-cell1-conductor-0\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.887871 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.904442 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.932305 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.941394 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.946567 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.953429 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.953642 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 06:57:06 crc kubenswrapper[4861]: I0129 06:57:06.961736 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.083630 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.084118 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d8957c3-5da1-489d-8e12-f81810d94ada-logs\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.084207 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-config-data\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.085405 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.085463 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsm2n\" (UniqueName: \"kubernetes.io/projected/9d8957c3-5da1-489d-8e12-f81810d94ada-kube-api-access-gsm2n\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.128720 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e999579-6c0a-44cc-b329-7dbabc461fe4" path="/var/lib/kubelet/pods/0e999579-6c0a-44cc-b329-7dbabc461fe4/volumes" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.129543 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e650b78-7110-40bc-b59e-fdaea7ecc619" path="/var/lib/kubelet/pods/4e650b78-7110-40bc-b59e-fdaea7ecc619/volumes" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.187732 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d8957c3-5da1-489d-8e12-f81810d94ada-logs\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.187867 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-config-data\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.187963 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.188001 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsm2n\" (UniqueName: \"kubernetes.io/projected/9d8957c3-5da1-489d-8e12-f81810d94ada-kube-api-access-gsm2n\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.188159 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.188207 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d8957c3-5da1-489d-8e12-f81810d94ada-logs\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.194648 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.204862 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.205382 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-config-data\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.206035 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsm2n\" (UniqueName: \"kubernetes.io/projected/9d8957c3-5da1-489d-8e12-f81810d94ada-kube-api-access-gsm2n\") pod \"nova-metadata-0\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.327870 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.381508 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 06:57:07 crc kubenswrapper[4861]: W0129 06:57:07.387457 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc749a121_7e8e_4d49_8a30_c27fa21926b5.slice/crio-7d9daaa12f60f919b4c72f557e4f3e4503ab8bdf76430f65c47a6e1becb7f4a7 WatchSource:0}: Error finding container 7d9daaa12f60f919b4c72f557e4f3e4503ab8bdf76430f65c47a6e1becb7f4a7: Status 404 returned error can't find the container with id 7d9daaa12f60f919b4c72f557e4f3e4503ab8bdf76430f65c47a6e1becb7f4a7 Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.536861 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c749a121-7e8e-4d49-8a30-c27fa21926b5","Type":"ContainerStarted","Data":"7d9daaa12f60f919b4c72f557e4f3e4503ab8bdf76430f65c47a6e1becb7f4a7"} Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.537020 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="196bdecd-ee6f-4dc0-9d5b-a591ce688792" containerName="nova-scheduler-scheduler" containerID="cri-o://8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1" gracePeriod=30 Jan 29 06:57:07 crc kubenswrapper[4861]: I0129 06:57:07.811477 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:08 crc kubenswrapper[4861]: I0129 06:57:08.550371 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c749a121-7e8e-4d49-8a30-c27fa21926b5","Type":"ContainerStarted","Data":"928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3"} Jan 29 06:57:08 crc kubenswrapper[4861]: I0129 06:57:08.550789 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:08 crc kubenswrapper[4861]: I0129 06:57:08.554972 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9d8957c3-5da1-489d-8e12-f81810d94ada","Type":"ContainerStarted","Data":"400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c"} Jan 29 06:57:08 crc kubenswrapper[4861]: I0129 06:57:08.555012 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9d8957c3-5da1-489d-8e12-f81810d94ada","Type":"ContainerStarted","Data":"36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc"} Jan 29 06:57:08 crc kubenswrapper[4861]: I0129 06:57:08.555027 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9d8957c3-5da1-489d-8e12-f81810d94ada","Type":"ContainerStarted","Data":"2ec6893d4cc59eeb115973204fa34fae6ee99fe4abffae20736e856f3887bbf9"} Jan 29 06:57:08 crc kubenswrapper[4861]: I0129 06:57:08.578289 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.578269521 podStartE2EDuration="2.578269521s" podCreationTimestamp="2026-01-29 06:57:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:08.569118653 +0000 UTC m=+1320.240613240" watchObservedRunningTime="2026-01-29 06:57:08.578269521 +0000 UTC m=+1320.249764088" Jan 29 06:57:08 crc kubenswrapper[4861]: I0129 06:57:08.612053 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.612032467 podStartE2EDuration="2.612032467s" podCreationTimestamp="2026-01-29 06:57:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:08.602187612 +0000 UTC m=+1320.273682189" watchObservedRunningTime="2026-01-29 06:57:08.612032467 +0000 UTC m=+1320.283527024" Jan 29 06:57:10 crc kubenswrapper[4861]: E0129 06:57:10.003763 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 06:57:10 crc kubenswrapper[4861]: E0129 06:57:10.005561 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 06:57:10 crc kubenswrapper[4861]: E0129 06:57:10.006823 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 06:57:10 crc kubenswrapper[4861]: E0129 06:57:10.006854 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="196bdecd-ee6f-4dc0-9d5b-a591ce688792" containerName="nova-scheduler-scheduler" Jan 29 06:57:11 crc kubenswrapper[4861]: W0129 06:57:11.180464 4861 container.go:586] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1079014_dbe9_4626_9fab_cde23e57636e.slice/crio-0015da60c82109c244eb9867f3dda46b4585f8b13483f37fb11b8de6ba695638": error while statting cgroup v2: [openat2 /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1079014_dbe9_4626_9fab_cde23e57636e.slice/crio-0015da60c82109c244eb9867f3dda46b4585f8b13483f37fb11b8de6ba695638/pids.max: no such file or directory], continuing to push stats Jan 29 06:57:11 crc kubenswrapper[4861]: E0129 06:57:11.297819 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1079014_dbe9_4626_9fab_cde23e57636e.slice/crio-conmon-bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a.scope\": RecentStats: unable to find data in memory cache]" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.439390 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.568477 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njw4f\" (UniqueName: \"kubernetes.io/projected/196bdecd-ee6f-4dc0-9d5b-a591ce688792-kube-api-access-njw4f\") pod \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.568605 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-combined-ca-bundle\") pod \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.568668 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-config-data\") pod \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\" (UID: \"196bdecd-ee6f-4dc0-9d5b-a591ce688792\") " Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.575310 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/196bdecd-ee6f-4dc0-9d5b-a591ce688792-kube-api-access-njw4f" (OuterVolumeSpecName: "kube-api-access-njw4f") pod "196bdecd-ee6f-4dc0-9d5b-a591ce688792" (UID: "196bdecd-ee6f-4dc0-9d5b-a591ce688792"). InnerVolumeSpecName "kube-api-access-njw4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.584762 4861 generic.go:334] "Generic (PLEG): container finished" podID="196bdecd-ee6f-4dc0-9d5b-a591ce688792" containerID="8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1" exitCode=0 Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.584870 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"196bdecd-ee6f-4dc0-9d5b-a591ce688792","Type":"ContainerDied","Data":"8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1"} Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.584938 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.585178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"196bdecd-ee6f-4dc0-9d5b-a591ce688792","Type":"ContainerDied","Data":"5e5cb260be37368e8ba67082f462a7334a0ce56af960a9bcc5ea40ce4787c0fb"} Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.585205 4861 scope.go:117] "RemoveContainer" containerID="8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.592277 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.594569 4861 generic.go:334] "Generic (PLEG): container finished" podID="d1079014-dbe9-4626-9fab-cde23e57636e" containerID="bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a" exitCode=0 Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.594633 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d1079014-dbe9-4626-9fab-cde23e57636e","Type":"ContainerDied","Data":"bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a"} Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.594671 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d1079014-dbe9-4626-9fab-cde23e57636e","Type":"ContainerDied","Data":"0015da60c82109c244eb9867f3dda46b4585f8b13483f37fb11b8de6ba695638"} Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.606410 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-config-data" (OuterVolumeSpecName: "config-data") pod "196bdecd-ee6f-4dc0-9d5b-a591ce688792" (UID: "196bdecd-ee6f-4dc0-9d5b-a591ce688792"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.611906 4861 scope.go:117] "RemoveContainer" containerID="8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1" Jan 29 06:57:11 crc kubenswrapper[4861]: E0129 06:57:11.613952 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1\": container with ID starting with 8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1 not found: ID does not exist" containerID="8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.613999 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1"} err="failed to get container status \"8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1\": rpc error: code = NotFound desc = could not find container \"8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1\": container with ID starting with 8e2bf8a2ea5a280ebc7aa7cb119a84721145fef7d65502053ac924fc188b7fa1 not found: ID does not exist" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.614031 4861 scope.go:117] "RemoveContainer" containerID="bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.619695 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "196bdecd-ee6f-4dc0-9d5b-a591ce688792" (UID: "196bdecd-ee6f-4dc0-9d5b-a591ce688792"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.649448 4861 scope.go:117] "RemoveContainer" containerID="5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.670961 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1079014-dbe9-4626-9fab-cde23e57636e-logs\") pod \"d1079014-dbe9-4626-9fab-cde23e57636e\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.671147 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-config-data\") pod \"d1079014-dbe9-4626-9fab-cde23e57636e\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.671179 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-combined-ca-bundle\") pod \"d1079014-dbe9-4626-9fab-cde23e57636e\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.671262 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fg6jt\" (UniqueName: \"kubernetes.io/projected/d1079014-dbe9-4626-9fab-cde23e57636e-kube-api-access-fg6jt\") pod \"d1079014-dbe9-4626-9fab-cde23e57636e\" (UID: \"d1079014-dbe9-4626-9fab-cde23e57636e\") " Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.671627 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1079014-dbe9-4626-9fab-cde23e57636e-logs" (OuterVolumeSpecName: "logs") pod "d1079014-dbe9-4626-9fab-cde23e57636e" (UID: "d1079014-dbe9-4626-9fab-cde23e57636e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.671864 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1079014-dbe9-4626-9fab-cde23e57636e-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.671885 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njw4f\" (UniqueName: \"kubernetes.io/projected/196bdecd-ee6f-4dc0-9d5b-a591ce688792-kube-api-access-njw4f\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.671896 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.671908 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/196bdecd-ee6f-4dc0-9d5b-a591ce688792-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.673719 4861 scope.go:117] "RemoveContainer" containerID="bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a" Jan 29 06:57:11 crc kubenswrapper[4861]: E0129 06:57:11.674201 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a\": container with ID starting with bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a not found: ID does not exist" containerID="bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.674231 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a"} err="failed to get container status \"bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a\": rpc error: code = NotFound desc = could not find container \"bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a\": container with ID starting with bf26ac6a1fc3041a95c3107f854f5805db9f2d1e8a526dba8fa78304f609777a not found: ID does not exist" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.674252 4861 scope.go:117] "RemoveContainer" containerID="5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7" Jan 29 06:57:11 crc kubenswrapper[4861]: E0129 06:57:11.674726 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7\": container with ID starting with 5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7 not found: ID does not exist" containerID="5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.674752 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7"} err="failed to get container status \"5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7\": rpc error: code = NotFound desc = could not find container \"5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7\": container with ID starting with 5963d1724b8a0b8cc593fb8d2025b570fa30371fd71c09e546d0ba717b34ffd7 not found: ID does not exist" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.675162 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1079014-dbe9-4626-9fab-cde23e57636e-kube-api-access-fg6jt" (OuterVolumeSpecName: "kube-api-access-fg6jt") pod "d1079014-dbe9-4626-9fab-cde23e57636e" (UID: "d1079014-dbe9-4626-9fab-cde23e57636e"). InnerVolumeSpecName "kube-api-access-fg6jt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.695173 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1079014-dbe9-4626-9fab-cde23e57636e" (UID: "d1079014-dbe9-4626-9fab-cde23e57636e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.697687 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-config-data" (OuterVolumeSpecName: "config-data") pod "d1079014-dbe9-4626-9fab-cde23e57636e" (UID: "d1079014-dbe9-4626-9fab-cde23e57636e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.774330 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.774367 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1079014-dbe9-4626-9fab-cde23e57636e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.774380 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fg6jt\" (UniqueName: \"kubernetes.io/projected/d1079014-dbe9-4626-9fab-cde23e57636e-kube-api-access-fg6jt\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.918354 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.932529 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.951768 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:11 crc kubenswrapper[4861]: E0129 06:57:11.952267 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-api" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.952293 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-api" Jan 29 06:57:11 crc kubenswrapper[4861]: E0129 06:57:11.952320 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-log" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.952329 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-log" Jan 29 06:57:11 crc kubenswrapper[4861]: E0129 06:57:11.952341 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="196bdecd-ee6f-4dc0-9d5b-a591ce688792" containerName="nova-scheduler-scheduler" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.952349 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="196bdecd-ee6f-4dc0-9d5b-a591ce688792" containerName="nova-scheduler-scheduler" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.952585 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-log" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.952610 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" containerName="nova-api-api" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.952629 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="196bdecd-ee6f-4dc0-9d5b-a591ce688792" containerName="nova-scheduler-scheduler" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.953700 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.956414 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 06:57:11 crc kubenswrapper[4861]: I0129 06:57:11.961556 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.104909 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.104971 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-config-data\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.105095 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gg7p7\" (UniqueName: \"kubernetes.io/projected/baec84f8-0437-41da-a5f5-7b88894605eb-kube-api-access-gg7p7\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.207003 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.207094 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-config-data\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.207891 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gg7p7\" (UniqueName: \"kubernetes.io/projected/baec84f8-0437-41da-a5f5-7b88894605eb-kube-api-access-gg7p7\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.210465 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-config-data\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.217176 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.231363 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gg7p7\" (UniqueName: \"kubernetes.io/projected/baec84f8-0437-41da-a5f5-7b88894605eb-kube-api-access-gg7p7\") pod \"nova-scheduler-0\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.276186 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.329483 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.329684 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.608146 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.642775 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.655353 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.672286 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.674235 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.677981 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.682575 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.751767 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:12 crc kubenswrapper[4861]: W0129 06:57:12.756856 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbaec84f8_0437_41da_a5f5_7b88894605eb.slice/crio-6ac861e78068334500e10f56df1418c49120feb0a093737cfded2a91c7aa0eb4 WatchSource:0}: Error finding container 6ac861e78068334500e10f56df1418c49120feb0a093737cfded2a91c7aa0eb4: Status 404 returned error can't find the container with id 6ac861e78068334500e10f56df1418c49120feb0a093737cfded2a91c7aa0eb4 Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.822890 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chpmr\" (UniqueName: \"kubernetes.io/projected/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-kube-api-access-chpmr\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.822938 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.822972 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-logs\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.823299 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-config-data\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.924712 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-config-data\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.925018 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chpmr\" (UniqueName: \"kubernetes.io/projected/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-kube-api-access-chpmr\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.925061 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.925103 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-logs\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.925435 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-logs\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.930655 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.932982 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-config-data\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:12 crc kubenswrapper[4861]: I0129 06:57:12.940974 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chpmr\" (UniqueName: \"kubernetes.io/projected/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-kube-api-access-chpmr\") pod \"nova-api-0\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " pod="openstack/nova-api-0" Jan 29 06:57:13 crc kubenswrapper[4861]: I0129 06:57:13.003919 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:13 crc kubenswrapper[4861]: I0129 06:57:13.140508 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="196bdecd-ee6f-4dc0-9d5b-a591ce688792" path="/var/lib/kubelet/pods/196bdecd-ee6f-4dc0-9d5b-a591ce688792/volumes" Jan 29 06:57:13 crc kubenswrapper[4861]: I0129 06:57:13.141978 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1079014-dbe9-4626-9fab-cde23e57636e" path="/var/lib/kubelet/pods/d1079014-dbe9-4626-9fab-cde23e57636e/volumes" Jan 29 06:57:13 crc kubenswrapper[4861]: W0129 06:57:13.484429 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7b5789b_e55e_4ac5_9f7e_c1881c760a28.slice/crio-3095210f38fa185dc7ee7ad02efc50fbcc2e20b7c1efe22bb4f07aea183ccf4a WatchSource:0}: Error finding container 3095210f38fa185dc7ee7ad02efc50fbcc2e20b7c1efe22bb4f07aea183ccf4a: Status 404 returned error can't find the container with id 3095210f38fa185dc7ee7ad02efc50fbcc2e20b7c1efe22bb4f07aea183ccf4a Jan 29 06:57:13 crc kubenswrapper[4861]: I0129 06:57:13.485747 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:13 crc kubenswrapper[4861]: I0129 06:57:13.624629 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"baec84f8-0437-41da-a5f5-7b88894605eb","Type":"ContainerStarted","Data":"62a58805c7d853890393b6f9dcd912e589adc6cfb153bd0108fd519065fee75c"} Jan 29 06:57:13 crc kubenswrapper[4861]: I0129 06:57:13.624673 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"baec84f8-0437-41da-a5f5-7b88894605eb","Type":"ContainerStarted","Data":"6ac861e78068334500e10f56df1418c49120feb0a093737cfded2a91c7aa0eb4"} Jan 29 06:57:13 crc kubenswrapper[4861]: I0129 06:57:13.626719 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b7b5789b-e55e-4ac5-9f7e-c1881c760a28","Type":"ContainerStarted","Data":"3095210f38fa185dc7ee7ad02efc50fbcc2e20b7c1efe22bb4f07aea183ccf4a"} Jan 29 06:57:13 crc kubenswrapper[4861]: I0129 06:57:13.648062 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.648037456 podStartE2EDuration="2.648037456s" podCreationTimestamp="2026-01-29 06:57:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:13.646622499 +0000 UTC m=+1325.318117106" watchObservedRunningTime="2026-01-29 06:57:13.648037456 +0000 UTC m=+1325.319532033" Jan 29 06:57:14 crc kubenswrapper[4861]: I0129 06:57:14.640283 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b7b5789b-e55e-4ac5-9f7e-c1881c760a28","Type":"ContainerStarted","Data":"8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977"} Jan 29 06:57:14 crc kubenswrapper[4861]: I0129 06:57:14.640877 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b7b5789b-e55e-4ac5-9f7e-c1881c760a28","Type":"ContainerStarted","Data":"39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf"} Jan 29 06:57:14 crc kubenswrapper[4861]: I0129 06:57:14.714898 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.714876817 podStartE2EDuration="2.714876817s" podCreationTimestamp="2026-01-29 06:57:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:14.709747223 +0000 UTC m=+1326.381241790" watchObservedRunningTime="2026-01-29 06:57:14.714876817 +0000 UTC m=+1326.386371384" Jan 29 06:57:16 crc kubenswrapper[4861]: I0129 06:57:16.936028 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 29 06:57:17 crc kubenswrapper[4861]: I0129 06:57:17.276453 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 06:57:17 crc kubenswrapper[4861]: I0129 06:57:17.328985 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 06:57:17 crc kubenswrapper[4861]: I0129 06:57:17.329099 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 06:57:18 crc kubenswrapper[4861]: I0129 06:57:18.345322 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 06:57:18 crc kubenswrapper[4861]: I0129 06:57:18.345317 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 06:57:18 crc kubenswrapper[4861]: I0129 06:57:18.628796 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 06:57:22 crc kubenswrapper[4861]: I0129 06:57:22.276599 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 06:57:22 crc kubenswrapper[4861]: I0129 06:57:22.303527 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 06:57:22 crc kubenswrapper[4861]: I0129 06:57:22.761539 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 06:57:23 crc kubenswrapper[4861]: I0129 06:57:23.005055 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 06:57:23 crc kubenswrapper[4861]: I0129 06:57:23.005180 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 06:57:24 crc kubenswrapper[4861]: I0129 06:57:24.088526 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.202:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 06:57:24 crc kubenswrapper[4861]: I0129 06:57:24.088929 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.202:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 06:57:27 crc kubenswrapper[4861]: I0129 06:57:27.335610 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 06:57:27 crc kubenswrapper[4861]: I0129 06:57:27.344947 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 06:57:27 crc kubenswrapper[4861]: I0129 06:57:27.345240 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 06:57:27 crc kubenswrapper[4861]: I0129 06:57:27.810650 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.629765 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.630301 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.630344 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.631062 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"85000e70e0f61206c55ed7e3495b90975c6a190d05beb488bbd436b08d076e87"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.631149 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://85000e70e0f61206c55ed7e3495b90975c6a190d05beb488bbd436b08d076e87" gracePeriod=600 Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.819013 4861 generic.go:334] "Generic (PLEG): container finished" podID="b7d2bf83-0528-4d71-9700-73548000f02c" containerID="2578f711dc2641e8fba1a7313e4c5059d379b749e689eba9c1b05fac506fd409" exitCode=137 Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.819114 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b7d2bf83-0528-4d71-9700-73548000f02c","Type":"ContainerDied","Data":"2578f711dc2641e8fba1a7313e4c5059d379b749e689eba9c1b05fac506fd409"} Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.819146 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b7d2bf83-0528-4d71-9700-73548000f02c","Type":"ContainerDied","Data":"e4824901064fa4cddd04d94cd1fd7a98ce896dd96ad636ab50a1687b9e290630"} Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.819160 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4824901064fa4cddd04d94cd1fd7a98ce896dd96ad636ab50a1687b9e290630" Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.821708 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="85000e70e0f61206c55ed7e3495b90975c6a190d05beb488bbd436b08d076e87" exitCode=0 Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.821737 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"85000e70e0f61206c55ed7e3495b90975c6a190d05beb488bbd436b08d076e87"} Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.821763 4861 scope.go:117] "RemoveContainer" containerID="6286a50c0abd3320b1618a8f91d0446eb73b0dae9310f72e53e305c4914d0508" Jan 29 06:57:30 crc kubenswrapper[4861]: I0129 06:57:30.902270 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.044271 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzprp\" (UniqueName: \"kubernetes.io/projected/b7d2bf83-0528-4d71-9700-73548000f02c-kube-api-access-dzprp\") pod \"b7d2bf83-0528-4d71-9700-73548000f02c\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.044349 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-config-data\") pod \"b7d2bf83-0528-4d71-9700-73548000f02c\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.044427 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-combined-ca-bundle\") pod \"b7d2bf83-0528-4d71-9700-73548000f02c\" (UID: \"b7d2bf83-0528-4d71-9700-73548000f02c\") " Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.050005 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7d2bf83-0528-4d71-9700-73548000f02c-kube-api-access-dzprp" (OuterVolumeSpecName: "kube-api-access-dzprp") pod "b7d2bf83-0528-4d71-9700-73548000f02c" (UID: "b7d2bf83-0528-4d71-9700-73548000f02c"). InnerVolumeSpecName "kube-api-access-dzprp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.070489 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-config-data" (OuterVolumeSpecName: "config-data") pod "b7d2bf83-0528-4d71-9700-73548000f02c" (UID: "b7d2bf83-0528-4d71-9700-73548000f02c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.070847 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7d2bf83-0528-4d71-9700-73548000f02c" (UID: "b7d2bf83-0528-4d71-9700-73548000f02c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.146518 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzprp\" (UniqueName: \"kubernetes.io/projected/b7d2bf83-0528-4d71-9700-73548000f02c-kube-api-access-dzprp\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.146545 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.146554 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7d2bf83-0528-4d71-9700-73548000f02c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.834821 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.834833 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a"} Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.886289 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.902353 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.910814 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:57:31 crc kubenswrapper[4861]: E0129 06:57:31.911442 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7d2bf83-0528-4d71-9700-73548000f02c" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.911471 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7d2bf83-0528-4d71-9700-73548000f02c" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.911815 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7d2bf83-0528-4d71-9700-73548000f02c" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.912823 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.920783 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.949937 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.954323 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 29 06:57:31 crc kubenswrapper[4861]: I0129 06:57:31.954623 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.079242 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.079593 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.079621 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2dg4\" (UniqueName: \"kubernetes.io/projected/c4c159fd-7714-4351-8258-437e67ff5dbc-kube-api-access-p2dg4\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.079658 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.079790 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.181353 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.181503 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.181577 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.181601 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2dg4\" (UniqueName: \"kubernetes.io/projected/c4c159fd-7714-4351-8258-437e67ff5dbc-kube-api-access-p2dg4\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.181633 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.187452 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.188304 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.193503 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.197593 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.214669 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2dg4\" (UniqueName: \"kubernetes.io/projected/c4c159fd-7714-4351-8258-437e67ff5dbc-kube-api-access-p2dg4\") pod \"nova-cell1-novncproxy-0\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.292646 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:32 crc kubenswrapper[4861]: I0129 06:57:32.845288 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:57:32 crc kubenswrapper[4861]: W0129 06:57:32.855295 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4c159fd_7714_4351_8258_437e67ff5dbc.slice/crio-e96e540b995c3e31ec41237d45d3ac474c5a0652535800c89d9c23913516e2c2 WatchSource:0}: Error finding container e96e540b995c3e31ec41237d45d3ac474c5a0652535800c89d9c23913516e2c2: Status 404 returned error can't find the container with id e96e540b995c3e31ec41237d45d3ac474c5a0652535800c89d9c23913516e2c2 Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.012776 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.013291 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.013565 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.021964 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.133369 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7d2bf83-0528-4d71-9700-73548000f02c" path="/var/lib/kubelet/pods/b7d2bf83-0528-4d71-9700-73548000f02c/volumes" Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.862674 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c4c159fd-7714-4351-8258-437e67ff5dbc","Type":"ContainerStarted","Data":"004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95"} Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.862976 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.863022 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c4c159fd-7714-4351-8258-437e67ff5dbc","Type":"ContainerStarted","Data":"e96e540b995c3e31ec41237d45d3ac474c5a0652535800c89d9c23913516e2c2"} Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.869096 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 06:57:33 crc kubenswrapper[4861]: I0129 06:57:33.918892 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.91887052 podStartE2EDuration="2.91887052s" podCreationTimestamp="2026-01-29 06:57:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:33.912029492 +0000 UTC m=+1345.583524069" watchObservedRunningTime="2026-01-29 06:57:33.91887052 +0000 UTC m=+1345.590365077" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.145939 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-ktt88"] Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.147344 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.164445 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-ktt88"] Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.323643 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-svc\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.323911 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-swift-storage-0\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.323952 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-sb\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.324000 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmqvx\" (UniqueName: \"kubernetes.io/projected/86dd900f-d608-496c-91b3-be95d914cf58-kube-api-access-lmqvx\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.324038 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-config\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.324125 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-nb\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.425842 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-svc\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.425909 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-swift-storage-0\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.425949 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-sb\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.426002 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmqvx\" (UniqueName: \"kubernetes.io/projected/86dd900f-d608-496c-91b3-be95d914cf58-kube-api-access-lmqvx\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.426055 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-config\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.426116 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-nb\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.426997 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-svc\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.427032 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-sb\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.427223 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-swift-storage-0\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.427462 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-nb\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.427974 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-config\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.444014 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmqvx\" (UniqueName: \"kubernetes.io/projected/86dd900f-d608-496c-91b3-be95d914cf58-kube-api-access-lmqvx\") pod \"dnsmasq-dns-fcd6f8f8f-ktt88\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.478293 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:34 crc kubenswrapper[4861]: I0129 06:57:34.947479 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-ktt88"] Jan 29 06:57:35 crc kubenswrapper[4861]: I0129 06:57:35.885012 4861 generic.go:334] "Generic (PLEG): container finished" podID="86dd900f-d608-496c-91b3-be95d914cf58" containerID="7f360a109c4ea9a81863b2378bb3244474ecea22c079e1bd431ea1270e44d30d" exitCode=0 Jan 29 06:57:35 crc kubenswrapper[4861]: I0129 06:57:35.885059 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" event={"ID":"86dd900f-d608-496c-91b3-be95d914cf58","Type":"ContainerDied","Data":"7f360a109c4ea9a81863b2378bb3244474ecea22c079e1bd431ea1270e44d30d"} Jan 29 06:57:35 crc kubenswrapper[4861]: I0129 06:57:35.885720 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" event={"ID":"86dd900f-d608-496c-91b3-be95d914cf58","Type":"ContainerStarted","Data":"3e3440ea939dd2d2b29feb8067c587d14bde6fe199c73836f73fcee94a17f3a4"} Jan 29 06:57:35 crc kubenswrapper[4861]: I0129 06:57:35.975383 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:35 crc kubenswrapper[4861]: I0129 06:57:35.975641 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="ceilometer-central-agent" containerID="cri-o://c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c" gracePeriod=30 Jan 29 06:57:35 crc kubenswrapper[4861]: I0129 06:57:35.976050 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="proxy-httpd" containerID="cri-o://918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a" gracePeriod=30 Jan 29 06:57:35 crc kubenswrapper[4861]: I0129 06:57:35.976114 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="sg-core" containerID="cri-o://ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb" gracePeriod=30 Jan 29 06:57:35 crc kubenswrapper[4861]: I0129 06:57:35.976149 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="ceilometer-notification-agent" containerID="cri-o://18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613" gracePeriod=30 Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.869616 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.898984 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" event={"ID":"86dd900f-d608-496c-91b3-be95d914cf58","Type":"ContainerStarted","Data":"a8543a52cd59c6b1f2aa0887f12875b0d96e617f0a1ae249e3c7261009db9ece"} Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.899944 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.902405 4861 generic.go:334] "Generic (PLEG): container finished" podID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerID="918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a" exitCode=0 Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.902793 4861 generic.go:334] "Generic (PLEG): container finished" podID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerID="ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb" exitCode=2 Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.902811 4861 generic.go:334] "Generic (PLEG): container finished" podID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerID="c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c" exitCode=0 Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.903047 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-log" containerID="cri-o://39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf" gracePeriod=30 Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.903265 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerDied","Data":"918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a"} Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.903285 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-api" containerID="cri-o://8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977" gracePeriod=30 Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.903324 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerDied","Data":"ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb"} Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.903418 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerDied","Data":"c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c"} Jan 29 06:57:36 crc kubenswrapper[4861]: I0129 06:57:36.930568 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" podStartSLOduration=2.930547073 podStartE2EDuration="2.930547073s" podCreationTimestamp="2026-01-29 06:57:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:36.927185505 +0000 UTC m=+1348.598680092" watchObservedRunningTime="2026-01-29 06:57:36.930547073 +0000 UTC m=+1348.602041630" Jan 29 06:57:37 crc kubenswrapper[4861]: I0129 06:57:37.293919 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:37 crc kubenswrapper[4861]: I0129 06:57:37.912472 4861 generic.go:334] "Generic (PLEG): container finished" podID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerID="39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf" exitCode=143 Jan 29 06:57:37 crc kubenswrapper[4861]: I0129 06:57:37.912986 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b7b5789b-e55e-4ac5-9f7e-c1881c760a28","Type":"ContainerDied","Data":"39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf"} Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.750879 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.916054 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-scripts\") pod \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.916136 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-sg-core-conf-yaml\") pod \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.916218 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ts7b\" (UniqueName: \"kubernetes.io/projected/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-kube-api-access-7ts7b\") pod \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.916284 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-config-data\") pod \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.916369 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-log-httpd\") pod \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.916464 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-ceilometer-tls-certs\") pod \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.916621 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-run-httpd\") pod \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.916682 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-combined-ca-bundle\") pod \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\" (UID: \"740f35ca-86a8-45d8-86d2-4cbc1ca7e148\") " Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.917503 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "740f35ca-86a8-45d8-86d2-4cbc1ca7e148" (UID: "740f35ca-86a8-45d8-86d2-4cbc1ca7e148"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.917728 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.918173 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "740f35ca-86a8-45d8-86d2-4cbc1ca7e148" (UID: "740f35ca-86a8-45d8-86d2-4cbc1ca7e148"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.928184 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-scripts" (OuterVolumeSpecName: "scripts") pod "740f35ca-86a8-45d8-86d2-4cbc1ca7e148" (UID: "740f35ca-86a8-45d8-86d2-4cbc1ca7e148"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.928247 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-kube-api-access-7ts7b" (OuterVolumeSpecName: "kube-api-access-7ts7b") pod "740f35ca-86a8-45d8-86d2-4cbc1ca7e148" (UID: "740f35ca-86a8-45d8-86d2-4cbc1ca7e148"). InnerVolumeSpecName "kube-api-access-7ts7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.938145 4861 generic.go:334] "Generic (PLEG): container finished" podID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerID="18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613" exitCode=0 Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.938197 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerDied","Data":"18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613"} Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.938229 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"740f35ca-86a8-45d8-86d2-4cbc1ca7e148","Type":"ContainerDied","Data":"398e930f0f9679b8ac4568171cffffbc2bb1ae8279ef5f7e7c2dcde6d6d6455e"} Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.938248 4861 scope.go:117] "RemoveContainer" containerID="918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.938394 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.956188 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "740f35ca-86a8-45d8-86d2-4cbc1ca7e148" (UID: "740f35ca-86a8-45d8-86d2-4cbc1ca7e148"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:38 crc kubenswrapper[4861]: I0129 06:57:38.981970 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "740f35ca-86a8-45d8-86d2-4cbc1ca7e148" (UID: "740f35ca-86a8-45d8-86d2-4cbc1ca7e148"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.005247 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "740f35ca-86a8-45d8-86d2-4cbc1ca7e148" (UID: "740f35ca-86a8-45d8-86d2-4cbc1ca7e148"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.019607 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.019894 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.019904 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ts7b\" (UniqueName: \"kubernetes.io/projected/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-kube-api-access-7ts7b\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.019913 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.019923 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.019930 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.027702 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-config-data" (OuterVolumeSpecName: "config-data") pod "740f35ca-86a8-45d8-86d2-4cbc1ca7e148" (UID: "740f35ca-86a8-45d8-86d2-4cbc1ca7e148"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.029993 4861 scope.go:117] "RemoveContainer" containerID="ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.047946 4861 scope.go:117] "RemoveContainer" containerID="18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.068453 4861 scope.go:117] "RemoveContainer" containerID="c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.088344 4861 scope.go:117] "RemoveContainer" containerID="918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a" Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.089407 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a\": container with ID starting with 918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a not found: ID does not exist" containerID="918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.089463 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a"} err="failed to get container status \"918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a\": rpc error: code = NotFound desc = could not find container \"918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a\": container with ID starting with 918163e637632e5e24d4881f6446b89e5e730ccade57ad30a0a95df5f5d3aa7a not found: ID does not exist" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.089596 4861 scope.go:117] "RemoveContainer" containerID="ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb" Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.090003 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb\": container with ID starting with ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb not found: ID does not exist" containerID="ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.090047 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb"} err="failed to get container status \"ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb\": rpc error: code = NotFound desc = could not find container \"ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb\": container with ID starting with ed30ee141247d26e8b8ea2210d357c552de276072c756fc7b1f13928ffad9ffb not found: ID does not exist" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.090087 4861 scope.go:117] "RemoveContainer" containerID="18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613" Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.090380 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613\": container with ID starting with 18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613 not found: ID does not exist" containerID="18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.090413 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613"} err="failed to get container status \"18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613\": rpc error: code = NotFound desc = could not find container \"18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613\": container with ID starting with 18f72f9d87c3567b7dfd90a93627a9d7abddd58b54226f615a15371e12048613 not found: ID does not exist" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.090434 4861 scope.go:117] "RemoveContainer" containerID="c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c" Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.090650 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c\": container with ID starting with c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c not found: ID does not exist" containerID="c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.090673 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c"} err="failed to get container status \"c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c\": rpc error: code = NotFound desc = could not find container \"c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c\": container with ID starting with c0e30e94d78d4b90bc65e012fa6d0d8f8c9d07946c93ca76b1ca75d3a80f916c not found: ID does not exist" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.122384 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/740f35ca-86a8-45d8-86d2-4cbc1ca7e148-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.259942 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.268593 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.280635 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.281086 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="ceilometer-notification-agent" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.281101 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="ceilometer-notification-agent" Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.281129 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="sg-core" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.281134 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="sg-core" Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.281143 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="proxy-httpd" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.281149 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="proxy-httpd" Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.281157 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="ceilometer-central-agent" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.281162 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="ceilometer-central-agent" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.281310 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="ceilometer-notification-agent" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.281328 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="proxy-httpd" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.281344 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="sg-core" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.281359 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" containerName="ceilometer-central-agent" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.283162 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.286559 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.286781 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.286798 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.293740 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.427937 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.427990 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-scripts\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.428055 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-config-data\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.428112 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb5b5\" (UniqueName: \"kubernetes.io/projected/dd2a9571-5619-4f88-b574-71e3ba931b08-kube-api-access-nb5b5\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.428145 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-run-httpd\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.428176 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.428201 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-log-httpd\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.428328 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.483736 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:39 crc kubenswrapper[4861]: E0129 06:57:39.484660 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceilometer-tls-certs combined-ca-bundle config-data kube-api-access-nb5b5 log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="dd2a9571-5619-4f88-b574-71e3ba931b08" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.529526 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.529628 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.529802 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-scripts\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.530271 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-config-data\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.530365 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb5b5\" (UniqueName: \"kubernetes.io/projected/dd2a9571-5619-4f88-b574-71e3ba931b08-kube-api-access-nb5b5\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.530716 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-run-httpd\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.530763 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.530788 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-log-httpd\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.531334 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-run-httpd\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.531398 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-log-httpd\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.533424 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-scripts\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.534401 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.534596 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.534599 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-config-data\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.535501 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.552039 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb5b5\" (UniqueName: \"kubernetes.io/projected/dd2a9571-5619-4f88-b574-71e3ba931b08-kube-api-access-nb5b5\") pod \"ceilometer-0\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.954805 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:57:39 crc kubenswrapper[4861]: I0129 06:57:39.969048 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.142218 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-log-httpd\") pod \"dd2a9571-5619-4f88-b574-71e3ba931b08\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.142588 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "dd2a9571-5619-4f88-b574-71e3ba931b08" (UID: "dd2a9571-5619-4f88-b574-71e3ba931b08"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.142642 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb5b5\" (UniqueName: \"kubernetes.io/projected/dd2a9571-5619-4f88-b574-71e3ba931b08-kube-api-access-nb5b5\") pod \"dd2a9571-5619-4f88-b574-71e3ba931b08\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.142764 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-sg-core-conf-yaml\") pod \"dd2a9571-5619-4f88-b574-71e3ba931b08\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.142832 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-config-data\") pod \"dd2a9571-5619-4f88-b574-71e3ba931b08\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.142868 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-run-httpd\") pod \"dd2a9571-5619-4f88-b574-71e3ba931b08\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.142935 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-scripts\") pod \"dd2a9571-5619-4f88-b574-71e3ba931b08\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.142961 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-ceilometer-tls-certs\") pod \"dd2a9571-5619-4f88-b574-71e3ba931b08\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.143147 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-combined-ca-bundle\") pod \"dd2a9571-5619-4f88-b574-71e3ba931b08\" (UID: \"dd2a9571-5619-4f88-b574-71e3ba931b08\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.143744 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.144258 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "dd2a9571-5619-4f88-b574-71e3ba931b08" (UID: "dd2a9571-5619-4f88-b574-71e3ba931b08"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.148762 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd2a9571-5619-4f88-b574-71e3ba931b08-kube-api-access-nb5b5" (OuterVolumeSpecName: "kube-api-access-nb5b5") pod "dd2a9571-5619-4f88-b574-71e3ba931b08" (UID: "dd2a9571-5619-4f88-b574-71e3ba931b08"). InnerVolumeSpecName "kube-api-access-nb5b5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.149281 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "dd2a9571-5619-4f88-b574-71e3ba931b08" (UID: "dd2a9571-5619-4f88-b574-71e3ba931b08"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.149527 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-config-data" (OuterVolumeSpecName: "config-data") pod "dd2a9571-5619-4f88-b574-71e3ba931b08" (UID: "dd2a9571-5619-4f88-b574-71e3ba931b08"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.150566 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "dd2a9571-5619-4f88-b574-71e3ba931b08" (UID: "dd2a9571-5619-4f88-b574-71e3ba931b08"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.151152 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-scripts" (OuterVolumeSpecName: "scripts") pod "dd2a9571-5619-4f88-b574-71e3ba931b08" (UID: "dd2a9571-5619-4f88-b574-71e3ba931b08"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.152847 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd2a9571-5619-4f88-b574-71e3ba931b08" (UID: "dd2a9571-5619-4f88-b574-71e3ba931b08"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.255504 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.255541 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb5b5\" (UniqueName: \"kubernetes.io/projected/dd2a9571-5619-4f88-b574-71e3ba931b08-kube-api-access-nb5b5\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.255552 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.255562 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.255570 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd2a9571-5619-4f88-b574-71e3ba931b08-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.255579 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.255724 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd2a9571-5619-4f88-b574-71e3ba931b08-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.549171 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.664538 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-logs\") pod \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.664945 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chpmr\" (UniqueName: \"kubernetes.io/projected/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-kube-api-access-chpmr\") pod \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.665061 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-config-data\") pod \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.665084 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-logs" (OuterVolumeSpecName: "logs") pod "b7b5789b-e55e-4ac5-9f7e-c1881c760a28" (UID: "b7b5789b-e55e-4ac5-9f7e-c1881c760a28"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.665126 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-combined-ca-bundle\") pod \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\" (UID: \"b7b5789b-e55e-4ac5-9f7e-c1881c760a28\") " Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.665844 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.674530 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-kube-api-access-chpmr" (OuterVolumeSpecName: "kube-api-access-chpmr") pod "b7b5789b-e55e-4ac5-9f7e-c1881c760a28" (UID: "b7b5789b-e55e-4ac5-9f7e-c1881c760a28"). InnerVolumeSpecName "kube-api-access-chpmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.698134 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-config-data" (OuterVolumeSpecName: "config-data") pod "b7b5789b-e55e-4ac5-9f7e-c1881c760a28" (UID: "b7b5789b-e55e-4ac5-9f7e-c1881c760a28"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.710626 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7b5789b-e55e-4ac5-9f7e-c1881c760a28" (UID: "b7b5789b-e55e-4ac5-9f7e-c1881c760a28"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.767049 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.767123 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.767135 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chpmr\" (UniqueName: \"kubernetes.io/projected/b7b5789b-e55e-4ac5-9f7e-c1881c760a28-kube-api-access-chpmr\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.967301 4861 generic.go:334] "Generic (PLEG): container finished" podID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerID="8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977" exitCode=0 Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.967382 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.973304 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.973360 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b7b5789b-e55e-4ac5-9f7e-c1881c760a28","Type":"ContainerDied","Data":"8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977"} Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.973426 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b7b5789b-e55e-4ac5-9f7e-c1881c760a28","Type":"ContainerDied","Data":"3095210f38fa185dc7ee7ad02efc50fbcc2e20b7c1efe22bb4f07aea183ccf4a"} Jan 29 06:57:40 crc kubenswrapper[4861]: I0129 06:57:40.973449 4861 scope.go:117] "RemoveContainer" containerID="8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.006352 4861 scope.go:117] "RemoveContainer" containerID="39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.033744 4861 scope.go:117] "RemoveContainer" containerID="8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.035867 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: E0129 06:57:41.036266 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977\": container with ID starting with 8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977 not found: ID does not exist" containerID="8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.036317 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977"} err="failed to get container status \"8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977\": rpc error: code = NotFound desc = could not find container \"8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977\": container with ID starting with 8a70e5bad29218c6348785f910de09c344b8e6b50c97ad258708336e308aa977 not found: ID does not exist" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.036344 4861 scope.go:117] "RemoveContainer" containerID="39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf" Jan 29 06:57:41 crc kubenswrapper[4861]: E0129 06:57:41.038822 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf\": container with ID starting with 39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf not found: ID does not exist" containerID="39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.038866 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf"} err="failed to get container status \"39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf\": rpc error: code = NotFound desc = could not find container \"39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf\": container with ID starting with 39b672f5534aec6ba63a25ca81f210fb7454e9a3e83f464d222e245888ab21cf not found: ID does not exist" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.059042 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.087192 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.155900 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="740f35ca-86a8-45d8-86d2-4cbc1ca7e148" path="/var/lib/kubelet/pods/740f35ca-86a8-45d8-86d2-4cbc1ca7e148/volumes" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.156733 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd2a9571-5619-4f88-b574-71e3ba931b08" path="/var/lib/kubelet/pods/dd2a9571-5619-4f88-b574-71e3ba931b08/volumes" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.175552 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.192155 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: E0129 06:57:41.192832 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-api" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.192879 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-api" Jan 29 06:57:41 crc kubenswrapper[4861]: E0129 06:57:41.192950 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-log" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.192959 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-log" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.193307 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-log" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.193362 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" containerName="nova-api-api" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.196850 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.203231 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.206180 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.209219 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.210767 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.220505 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.220965 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.221234 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.221267 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.229129 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.243614 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.301735 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.301827 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-config-data\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.301900 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-public-tls-certs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.301974 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pt52\" (UniqueName: \"kubernetes.io/projected/31f6d03a-1759-43cf-9c55-484e4a152714-kube-api-access-5pt52\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302015 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6qwp\" (UniqueName: \"kubernetes.io/projected/333e76bd-235e-4b74-a6c9-ce702309ec38-kube-api-access-v6qwp\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302037 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-internal-tls-certs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302060 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-config-data\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302113 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-run-httpd\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302142 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-scripts\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302164 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302219 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-log-httpd\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302260 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f6d03a-1759-43cf-9c55-484e4a152714-logs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302381 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.302415 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404111 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pt52\" (UniqueName: \"kubernetes.io/projected/31f6d03a-1759-43cf-9c55-484e4a152714-kube-api-access-5pt52\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404166 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6qwp\" (UniqueName: \"kubernetes.io/projected/333e76bd-235e-4b74-a6c9-ce702309ec38-kube-api-access-v6qwp\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404186 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-internal-tls-certs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404201 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-config-data\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404228 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-run-httpd\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404250 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-scripts\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404268 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404313 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-log-httpd\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404338 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f6d03a-1759-43cf-9c55-484e4a152714-logs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404370 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404387 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404408 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404433 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-config-data\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.404458 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-public-tls-certs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.405136 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f6d03a-1759-43cf-9c55-484e4a152714-logs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.405480 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-run-httpd\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.406187 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-log-httpd\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.410482 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.410520 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.410629 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-config-data\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.410787 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-scripts\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.411693 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-internal-tls-certs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.412093 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.414683 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-public-tls-certs\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.414758 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-config-data\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.415330 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.420145 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6qwp\" (UniqueName: \"kubernetes.io/projected/333e76bd-235e-4b74-a6c9-ce702309ec38-kube-api-access-v6qwp\") pod \"ceilometer-0\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.424528 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pt52\" (UniqueName: \"kubernetes.io/projected/31f6d03a-1759-43cf-9c55-484e4a152714-kube-api-access-5pt52\") pod \"nova-api-0\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.536193 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.559862 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.848285 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.986428 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31f6d03a-1759-43cf-9c55-484e4a152714","Type":"ContainerStarted","Data":"3bfd44d0d2f578a438ec6fb9a6db7a2a20118714330f1fb3145c01bd33876025"} Jan 29 06:57:41 crc kubenswrapper[4861]: W0129 06:57:41.990757 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod333e76bd_235e_4b74_a6c9_ce702309ec38.slice/crio-185e63fa8cf97b639bf0524658521d81e89c696f450caaa12871d4748e697b1f WatchSource:0}: Error finding container 185e63fa8cf97b639bf0524658521d81e89c696f450caaa12871d4748e697b1f: Status 404 returned error can't find the container with id 185e63fa8cf97b639bf0524658521d81e89c696f450caaa12871d4748e697b1f Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.993540 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 06:57:41 crc kubenswrapper[4861]: I0129 06:57:41.994757 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:57:42 crc kubenswrapper[4861]: I0129 06:57:42.293531 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:42 crc kubenswrapper[4861]: I0129 06:57:42.327797 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:42 crc kubenswrapper[4861]: I0129 06:57:42.999117 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerStarted","Data":"e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e"} Jan 29 06:57:42 crc kubenswrapper[4861]: I0129 06:57:42.999514 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerStarted","Data":"185e63fa8cf97b639bf0524658521d81e89c696f450caaa12871d4748e697b1f"} Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.003860 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31f6d03a-1759-43cf-9c55-484e4a152714","Type":"ContainerStarted","Data":"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834"} Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.003900 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31f6d03a-1759-43cf-9c55-484e4a152714","Type":"ContainerStarted","Data":"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad"} Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.023869 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.023849395 podStartE2EDuration="2.023849395s" podCreationTimestamp="2026-01-29 06:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:43.018293531 +0000 UTC m=+1354.689788098" watchObservedRunningTime="2026-01-29 06:57:43.023849395 +0000 UTC m=+1354.695343952" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.030087 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.149448 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7b5789b-e55e-4ac5-9f7e-c1881c760a28" path="/var/lib/kubelet/pods/b7b5789b-e55e-4ac5-9f7e-c1881c760a28/volumes" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.238399 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-rh97j"] Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.243049 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.246240 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.248597 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.255456 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-rh97j"] Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.345965 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-config-data\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.346177 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8h4z\" (UniqueName: \"kubernetes.io/projected/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-kube-api-access-p8h4z\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.346438 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.346579 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-scripts\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.448097 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.448226 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-scripts\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.448327 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-config-data\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.448382 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8h4z\" (UniqueName: \"kubernetes.io/projected/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-kube-api-access-p8h4z\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.452918 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.452965 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-config-data\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.453655 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-scripts\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.466912 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8h4z\" (UniqueName: \"kubernetes.io/projected/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-kube-api-access-p8h4z\") pod \"nova-cell1-cell-mapping-rh97j\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:43 crc kubenswrapper[4861]: I0129 06:57:43.573202 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:44 crc kubenswrapper[4861]: I0129 06:57:44.013138 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerStarted","Data":"9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea"} Jan 29 06:57:44 crc kubenswrapper[4861]: I0129 06:57:44.088108 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-rh97j"] Jan 29 06:57:44 crc kubenswrapper[4861]: I0129 06:57:44.480298 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:57:44 crc kubenswrapper[4861]: I0129 06:57:44.561429 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-grc22"] Jan 29 06:57:44 crc kubenswrapper[4861]: I0129 06:57:44.561692 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" podUID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" containerName="dnsmasq-dns" containerID="cri-o://1e34d07e14aab00e720ca02ec07f36951679f3fedc5ba47bee1c985e8b91e13e" gracePeriod=10 Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.030097 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rh97j" event={"ID":"d30fad12-ed5e-4fbf-b668-28ef0e6c204c","Type":"ContainerStarted","Data":"6da086fcc4b00f89b80a9fff03917faac0c0299e635e3b23448c43c2f19c72ad"} Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.030406 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rh97j" event={"ID":"d30fad12-ed5e-4fbf-b668-28ef0e6c204c","Type":"ContainerStarted","Data":"d413fd34074e69c6a5825dae6119f91ebdd22ac9c2c0798a7603be943fa2092e"} Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.039831 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerStarted","Data":"f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754"} Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.042205 4861 generic.go:334] "Generic (PLEG): container finished" podID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" containerID="1e34d07e14aab00e720ca02ec07f36951679f3fedc5ba47bee1c985e8b91e13e" exitCode=0 Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.042253 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" event={"ID":"4caf0ea1-b9a5-478e-be1d-d7c48d185348","Type":"ContainerDied","Data":"1e34d07e14aab00e720ca02ec07f36951679f3fedc5ba47bee1c985e8b91e13e"} Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.042285 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" event={"ID":"4caf0ea1-b9a5-478e-be1d-d7c48d185348","Type":"ContainerDied","Data":"d2ab362d7d883a0d2be0f882d4dc1077f3ef5edba4f31502784aab2243aca5fe"} Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.042311 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2ab362d7d883a0d2be0f882d4dc1077f3ef5edba4f31502784aab2243aca5fe" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.046697 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-rh97j" podStartSLOduration=2.046682431 podStartE2EDuration="2.046682431s" podCreationTimestamp="2026-01-29 06:57:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:45.044596807 +0000 UTC m=+1356.716091374" watchObservedRunningTime="2026-01-29 06:57:45.046682431 +0000 UTC m=+1356.718176988" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.089029 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.180578 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-sb\") pod \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.180617 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-nb\") pod \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.180739 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-config\") pod \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.180886 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-swift-storage-0\") pod \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.180960 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-svc\") pod \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.181002 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k56d8\" (UniqueName: \"kubernetes.io/projected/4caf0ea1-b9a5-478e-be1d-d7c48d185348-kube-api-access-k56d8\") pod \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\" (UID: \"4caf0ea1-b9a5-478e-be1d-d7c48d185348\") " Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.187885 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4caf0ea1-b9a5-478e-be1d-d7c48d185348-kube-api-access-k56d8" (OuterVolumeSpecName: "kube-api-access-k56d8") pod "4caf0ea1-b9a5-478e-be1d-d7c48d185348" (UID: "4caf0ea1-b9a5-478e-be1d-d7c48d185348"). InnerVolumeSpecName "kube-api-access-k56d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.235673 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4caf0ea1-b9a5-478e-be1d-d7c48d185348" (UID: "4caf0ea1-b9a5-478e-be1d-d7c48d185348"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.241166 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4caf0ea1-b9a5-478e-be1d-d7c48d185348" (UID: "4caf0ea1-b9a5-478e-be1d-d7c48d185348"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.247925 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-config" (OuterVolumeSpecName: "config") pod "4caf0ea1-b9a5-478e-be1d-d7c48d185348" (UID: "4caf0ea1-b9a5-478e-be1d-d7c48d185348"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.259698 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4caf0ea1-b9a5-478e-be1d-d7c48d185348" (UID: "4caf0ea1-b9a5-478e-be1d-d7c48d185348"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.279282 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4caf0ea1-b9a5-478e-be1d-d7c48d185348" (UID: "4caf0ea1-b9a5-478e-be1d-d7c48d185348"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.282947 4861 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.282980 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.282990 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k56d8\" (UniqueName: \"kubernetes.io/projected/4caf0ea1-b9a5-478e-be1d-d7c48d185348-kube-api-access-k56d8\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.283001 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.283009 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:45 crc kubenswrapper[4861]: I0129 06:57:45.283020 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4caf0ea1-b9a5-478e-be1d-d7c48d185348-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:46 crc kubenswrapper[4861]: I0129 06:57:46.051333 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647df7b8c5-grc22" Jan 29 06:57:46 crc kubenswrapper[4861]: I0129 06:57:46.102590 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-grc22"] Jan 29 06:57:46 crc kubenswrapper[4861]: I0129 06:57:46.114049 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-grc22"] Jan 29 06:57:47 crc kubenswrapper[4861]: I0129 06:57:47.067477 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerStarted","Data":"d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337"} Jan 29 06:57:47 crc kubenswrapper[4861]: I0129 06:57:47.068355 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 06:57:47 crc kubenswrapper[4861]: I0129 06:57:47.102681 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.994849822 podStartE2EDuration="6.102642977s" podCreationTimestamp="2026-01-29 06:57:41 +0000 UTC" firstStartedPulling="2026-01-29 06:57:41.993298155 +0000 UTC m=+1353.664792712" lastFinishedPulling="2026-01-29 06:57:46.1010913 +0000 UTC m=+1357.772585867" observedRunningTime="2026-01-29 06:57:47.096356383 +0000 UTC m=+1358.767850960" watchObservedRunningTime="2026-01-29 06:57:47.102642977 +0000 UTC m=+1358.774137604" Jan 29 06:57:47 crc kubenswrapper[4861]: I0129 06:57:47.132872 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" path="/var/lib/kubelet/pods/4caf0ea1-b9a5-478e-be1d-d7c48d185348/volumes" Jan 29 06:57:49 crc kubenswrapper[4861]: I0129 06:57:49.099649 4861 generic.go:334] "Generic (PLEG): container finished" podID="d30fad12-ed5e-4fbf-b668-28ef0e6c204c" containerID="6da086fcc4b00f89b80a9fff03917faac0c0299e635e3b23448c43c2f19c72ad" exitCode=0 Jan 29 06:57:49 crc kubenswrapper[4861]: I0129 06:57:49.102187 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rh97j" event={"ID":"d30fad12-ed5e-4fbf-b668-28ef0e6c204c","Type":"ContainerDied","Data":"6da086fcc4b00f89b80a9fff03917faac0c0299e635e3b23448c43c2f19c72ad"} Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.582345 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.641439 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-config-data\") pod \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.641496 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-combined-ca-bundle\") pod \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.641533 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8h4z\" (UniqueName: \"kubernetes.io/projected/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-kube-api-access-p8h4z\") pod \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.641753 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-scripts\") pod \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\" (UID: \"d30fad12-ed5e-4fbf-b668-28ef0e6c204c\") " Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.649025 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-scripts" (OuterVolumeSpecName: "scripts") pod "d30fad12-ed5e-4fbf-b668-28ef0e6c204c" (UID: "d30fad12-ed5e-4fbf-b668-28ef0e6c204c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.652582 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-kube-api-access-p8h4z" (OuterVolumeSpecName: "kube-api-access-p8h4z") pod "d30fad12-ed5e-4fbf-b668-28ef0e6c204c" (UID: "d30fad12-ed5e-4fbf-b668-28ef0e6c204c"). InnerVolumeSpecName "kube-api-access-p8h4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.674710 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d30fad12-ed5e-4fbf-b668-28ef0e6c204c" (UID: "d30fad12-ed5e-4fbf-b668-28ef0e6c204c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.676231 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-config-data" (OuterVolumeSpecName: "config-data") pod "d30fad12-ed5e-4fbf-b668-28ef0e6c204c" (UID: "d30fad12-ed5e-4fbf-b668-28ef0e6c204c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.743857 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.743893 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8h4z\" (UniqueName: \"kubernetes.io/projected/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-kube-api-access-p8h4z\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.743904 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:50 crc kubenswrapper[4861]: I0129 06:57:50.743913 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30fad12-ed5e-4fbf-b668-28ef0e6c204c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.121378 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-rh97j" Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.128326 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-rh97j" event={"ID":"d30fad12-ed5e-4fbf-b668-28ef0e6c204c","Type":"ContainerDied","Data":"d413fd34074e69c6a5825dae6119f91ebdd22ac9c2c0798a7603be943fa2092e"} Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.128368 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d413fd34074e69c6a5825dae6119f91ebdd22ac9c2c0798a7603be943fa2092e" Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.341049 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.341570 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="baec84f8-0437-41da-a5f5-7b88894605eb" containerName="nova-scheduler-scheduler" containerID="cri-o://62a58805c7d853890393b6f9dcd912e589adc6cfb153bd0108fd519065fee75c" gracePeriod=30 Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.421297 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.421920 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" containerName="nova-api-log" containerID="cri-o://3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad" gracePeriod=30 Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.421965 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" containerName="nova-api-api" containerID="cri-o://f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834" gracePeriod=30 Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.437344 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.437626 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-log" containerID="cri-o://36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc" gracePeriod=30 Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.437762 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-metadata" containerID="cri-o://400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c" gracePeriod=30 Jan 29 06:57:51 crc kubenswrapper[4861]: I0129 06:57:51.982017 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.066275 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f6d03a-1759-43cf-9c55-484e4a152714-logs\") pod \"31f6d03a-1759-43cf-9c55-484e4a152714\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.066351 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-config-data\") pod \"31f6d03a-1759-43cf-9c55-484e4a152714\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.066499 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-combined-ca-bundle\") pod \"31f6d03a-1759-43cf-9c55-484e4a152714\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.066531 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-internal-tls-certs\") pod \"31f6d03a-1759-43cf-9c55-484e4a152714\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.066568 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5pt52\" (UniqueName: \"kubernetes.io/projected/31f6d03a-1759-43cf-9c55-484e4a152714-kube-api-access-5pt52\") pod \"31f6d03a-1759-43cf-9c55-484e4a152714\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.066627 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-public-tls-certs\") pod \"31f6d03a-1759-43cf-9c55-484e4a152714\" (UID: \"31f6d03a-1759-43cf-9c55-484e4a152714\") " Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.067548 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31f6d03a-1759-43cf-9c55-484e4a152714-logs" (OuterVolumeSpecName: "logs") pod "31f6d03a-1759-43cf-9c55-484e4a152714" (UID: "31f6d03a-1759-43cf-9c55-484e4a152714"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.069762 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f6d03a-1759-43cf-9c55-484e4a152714-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.072690 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31f6d03a-1759-43cf-9c55-484e4a152714-kube-api-access-5pt52" (OuterVolumeSpecName: "kube-api-access-5pt52") pod "31f6d03a-1759-43cf-9c55-484e4a152714" (UID: "31f6d03a-1759-43cf-9c55-484e4a152714"). InnerVolumeSpecName "kube-api-access-5pt52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.092930 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "31f6d03a-1759-43cf-9c55-484e4a152714" (UID: "31f6d03a-1759-43cf-9c55-484e4a152714"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.105537 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-config-data" (OuterVolumeSpecName: "config-data") pod "31f6d03a-1759-43cf-9c55-484e4a152714" (UID: "31f6d03a-1759-43cf-9c55-484e4a152714"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.125112 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "31f6d03a-1759-43cf-9c55-484e4a152714" (UID: "31f6d03a-1759-43cf-9c55-484e4a152714"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.132757 4861 generic.go:334] "Generic (PLEG): container finished" podID="31f6d03a-1759-43cf-9c55-484e4a152714" containerID="f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834" exitCode=0 Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.132786 4861 generic.go:334] "Generic (PLEG): container finished" podID="31f6d03a-1759-43cf-9c55-484e4a152714" containerID="3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad" exitCode=143 Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.132912 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.133800 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31f6d03a-1759-43cf-9c55-484e4a152714","Type":"ContainerDied","Data":"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834"} Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.134553 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31f6d03a-1759-43cf-9c55-484e4a152714","Type":"ContainerDied","Data":"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad"} Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.134581 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31f6d03a-1759-43cf-9c55-484e4a152714","Type":"ContainerDied","Data":"3bfd44d0d2f578a438ec6fb9a6db7a2a20118714330f1fb3145c01bd33876025"} Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.134617 4861 scope.go:117] "RemoveContainer" containerID="f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.137175 4861 generic.go:334] "Generic (PLEG): container finished" podID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerID="36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc" exitCode=143 Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.137230 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9d8957c3-5da1-489d-8e12-f81810d94ada","Type":"ContainerDied","Data":"36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc"} Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.154168 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "31f6d03a-1759-43cf-9c55-484e4a152714" (UID: "31f6d03a-1759-43cf-9c55-484e4a152714"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.163250 4861 scope.go:117] "RemoveContainer" containerID="3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.172281 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.172342 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.172354 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.172363 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5pt52\" (UniqueName: \"kubernetes.io/projected/31f6d03a-1759-43cf-9c55-484e4a152714-kube-api-access-5pt52\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.172394 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f6d03a-1759-43cf-9c55-484e4a152714-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.188187 4861 scope.go:117] "RemoveContainer" containerID="f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834" Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.188770 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834\": container with ID starting with f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834 not found: ID does not exist" containerID="f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.188836 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834"} err="failed to get container status \"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834\": rpc error: code = NotFound desc = could not find container \"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834\": container with ID starting with f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834 not found: ID does not exist" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.188878 4861 scope.go:117] "RemoveContainer" containerID="3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad" Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.189422 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad\": container with ID starting with 3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad not found: ID does not exist" containerID="3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.189529 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad"} err="failed to get container status \"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad\": rpc error: code = NotFound desc = could not find container \"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad\": container with ID starting with 3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad not found: ID does not exist" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.189614 4861 scope.go:117] "RemoveContainer" containerID="f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.189965 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834"} err="failed to get container status \"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834\": rpc error: code = NotFound desc = could not find container \"f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834\": container with ID starting with f3be34af7ec13977b0ba87ae16f9fc0a5ffac78e07787d43ebbf370026cc9834 not found: ID does not exist" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.189994 4861 scope.go:117] "RemoveContainer" containerID="3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.190525 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad"} err="failed to get container status \"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad\": rpc error: code = NotFound desc = could not find container \"3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad\": container with ID starting with 3507f1c1d86a1fe1980e1ad9087684666243f8f88dc1062d4f97c1a4dc20caad not found: ID does not exist" Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.278826 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="62a58805c7d853890393b6f9dcd912e589adc6cfb153bd0108fd519065fee75c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.281054 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="62a58805c7d853890393b6f9dcd912e589adc6cfb153bd0108fd519065fee75c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.282968 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="62a58805c7d853890393b6f9dcd912e589adc6cfb153bd0108fd519065fee75c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.283103 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="baec84f8-0437-41da-a5f5-7b88894605eb" containerName="nova-scheduler-scheduler" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.467022 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.490848 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.500353 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.504702 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" containerName="dnsmasq-dns" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.504735 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" containerName="dnsmasq-dns" Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.504766 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" containerName="nova-api-api" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.504779 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" containerName="nova-api-api" Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.504808 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" containerName="nova-api-log" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.504816 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" containerName="nova-api-log" Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.504836 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d30fad12-ed5e-4fbf-b668-28ef0e6c204c" containerName="nova-manage" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.504842 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d30fad12-ed5e-4fbf-b668-28ef0e6c204c" containerName="nova-manage" Jan 29 06:57:52 crc kubenswrapper[4861]: E0129 06:57:52.504860 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" containerName="init" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.504866 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" containerName="init" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.505318 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" containerName="nova-api-log" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.505340 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4caf0ea1-b9a5-478e-be1d-d7c48d185348" containerName="dnsmasq-dns" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.505378 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" containerName="nova-api-api" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.505400 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d30fad12-ed5e-4fbf-b668-28ef0e6c204c" containerName="nova-manage" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.506853 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.510206 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.510271 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.514714 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.550139 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.579023 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-config-data\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.579441 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.579585 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e78be7f2-60d4-4f0e-a510-bf5e652110d1-logs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.579701 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-public-tls-certs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.579853 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.579981 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxljp\" (UniqueName: \"kubernetes.io/projected/e78be7f2-60d4-4f0e-a510-bf5e652110d1-kube-api-access-fxljp\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.681626 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.681731 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e78be7f2-60d4-4f0e-a510-bf5e652110d1-logs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.681794 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-public-tls-certs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.681845 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.681899 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxljp\" (UniqueName: \"kubernetes.io/projected/e78be7f2-60d4-4f0e-a510-bf5e652110d1-kube-api-access-fxljp\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.681970 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-config-data\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.682417 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e78be7f2-60d4-4f0e-a510-bf5e652110d1-logs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.687712 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-public-tls-certs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.688318 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.689982 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.698914 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-config-data\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.709811 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxljp\" (UniqueName: \"kubernetes.io/projected/e78be7f2-60d4-4f0e-a510-bf5e652110d1-kube-api-access-fxljp\") pod \"nova-api-0\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " pod="openstack/nova-api-0" Jan 29 06:57:52 crc kubenswrapper[4861]: I0129 06:57:52.829769 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:57:53 crc kubenswrapper[4861]: I0129 06:57:53.129904 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31f6d03a-1759-43cf-9c55-484e4a152714" path="/var/lib/kubelet/pods/31f6d03a-1759-43cf-9c55-484e4a152714/volumes" Jan 29 06:57:53 crc kubenswrapper[4861]: W0129 06:57:53.303677 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode78be7f2_60d4_4f0e_a510_bf5e652110d1.slice/crio-f60336bd1f36360535654dc36904d81898276cd075922ad637ffe7e1bc175fb6 WatchSource:0}: Error finding container f60336bd1f36360535654dc36904d81898276cd075922ad637ffe7e1bc175fb6: Status 404 returned error can't find the container with id f60336bd1f36360535654dc36904d81898276cd075922ad637ffe7e1bc175fb6 Jan 29 06:57:53 crc kubenswrapper[4861]: I0129 06:57:53.306569 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:57:54 crc kubenswrapper[4861]: I0129 06:57:54.157645 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e78be7f2-60d4-4f0e-a510-bf5e652110d1","Type":"ContainerStarted","Data":"d3f897c89c2801586a375c91d0d6297c2d965784611ff0abf1834bdaf78b6197"} Jan 29 06:57:54 crc kubenswrapper[4861]: I0129 06:57:54.157958 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e78be7f2-60d4-4f0e-a510-bf5e652110d1","Type":"ContainerStarted","Data":"0376f68adc682a637da6dfee7b2c102f83dc2d0b2a50def1ae4ad71bb1486b5e"} Jan 29 06:57:54 crc kubenswrapper[4861]: I0129 06:57:54.157973 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e78be7f2-60d4-4f0e-a510-bf5e652110d1","Type":"ContainerStarted","Data":"f60336bd1f36360535654dc36904d81898276cd075922ad637ffe7e1bc175fb6"} Jan 29 06:57:54 crc kubenswrapper[4861]: I0129 06:57:54.188531 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.188506301 podStartE2EDuration="2.188506301s" podCreationTimestamp="2026-01-29 06:57:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:54.184943356 +0000 UTC m=+1365.856437973" watchObservedRunningTime="2026-01-29 06:57:54.188506301 +0000 UTC m=+1365.860000858" Jan 29 06:57:54 crc kubenswrapper[4861]: I0129 06:57:54.583743 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": read tcp 10.217.0.2:44358->10.217.0.200:8775: read: connection reset by peer" Jan 29 06:57:54 crc kubenswrapper[4861]: I0129 06:57:54.583871 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": read tcp 10.217.0.2:44364->10.217.0.200:8775: read: connection reset by peer" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.089835 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.168441 4861 generic.go:334] "Generic (PLEG): container finished" podID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerID="400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c" exitCode=0 Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.168894 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.169374 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9d8957c3-5da1-489d-8e12-f81810d94ada","Type":"ContainerDied","Data":"400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c"} Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.169399 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9d8957c3-5da1-489d-8e12-f81810d94ada","Type":"ContainerDied","Data":"2ec6893d4cc59eeb115973204fa34fae6ee99fe4abffae20736e856f3887bbf9"} Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.169414 4861 scope.go:117] "RemoveContainer" containerID="400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.195997 4861 scope.go:117] "RemoveContainer" containerID="36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.215333 4861 scope.go:117] "RemoveContainer" containerID="400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c" Jan 29 06:57:55 crc kubenswrapper[4861]: E0129 06:57:55.215708 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c\": container with ID starting with 400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c not found: ID does not exist" containerID="400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.215753 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c"} err="failed to get container status \"400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c\": rpc error: code = NotFound desc = could not find container \"400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c\": container with ID starting with 400bbaf3f46d30ec87ea75d89115c88b45bf1d15d8d0c602e215675ed2b6f83c not found: ID does not exist" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.215782 4861 scope.go:117] "RemoveContainer" containerID="36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc" Jan 29 06:57:55 crc kubenswrapper[4861]: E0129 06:57:55.216058 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc\": container with ID starting with 36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc not found: ID does not exist" containerID="36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.216094 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc"} err="failed to get container status \"36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc\": rpc error: code = NotFound desc = could not find container \"36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc\": container with ID starting with 36a7ff8c7df643515f62f7fac0828626a3cbafa7330611b528e302020f8394bc not found: ID does not exist" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.230894 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-config-data\") pod \"9d8957c3-5da1-489d-8e12-f81810d94ada\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.230950 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-nova-metadata-tls-certs\") pod \"9d8957c3-5da1-489d-8e12-f81810d94ada\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.231181 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-combined-ca-bundle\") pod \"9d8957c3-5da1-489d-8e12-f81810d94ada\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.231224 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d8957c3-5da1-489d-8e12-f81810d94ada-logs\") pod \"9d8957c3-5da1-489d-8e12-f81810d94ada\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.231252 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsm2n\" (UniqueName: \"kubernetes.io/projected/9d8957c3-5da1-489d-8e12-f81810d94ada-kube-api-access-gsm2n\") pod \"9d8957c3-5da1-489d-8e12-f81810d94ada\" (UID: \"9d8957c3-5da1-489d-8e12-f81810d94ada\") " Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.232170 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d8957c3-5da1-489d-8e12-f81810d94ada-logs" (OuterVolumeSpecName: "logs") pod "9d8957c3-5da1-489d-8e12-f81810d94ada" (UID: "9d8957c3-5da1-489d-8e12-f81810d94ada"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.237845 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d8957c3-5da1-489d-8e12-f81810d94ada-kube-api-access-gsm2n" (OuterVolumeSpecName: "kube-api-access-gsm2n") pod "9d8957c3-5da1-489d-8e12-f81810d94ada" (UID: "9d8957c3-5da1-489d-8e12-f81810d94ada"). InnerVolumeSpecName "kube-api-access-gsm2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.260739 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-config-data" (OuterVolumeSpecName: "config-data") pod "9d8957c3-5da1-489d-8e12-f81810d94ada" (UID: "9d8957c3-5da1-489d-8e12-f81810d94ada"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.280354 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d8957c3-5da1-489d-8e12-f81810d94ada" (UID: "9d8957c3-5da1-489d-8e12-f81810d94ada"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.290788 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "9d8957c3-5da1-489d-8e12-f81810d94ada" (UID: "9d8957c3-5da1-489d-8e12-f81810d94ada"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.333522 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.333838 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d8957c3-5da1-489d-8e12-f81810d94ada-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.333856 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsm2n\" (UniqueName: \"kubernetes.io/projected/9d8957c3-5da1-489d-8e12-f81810d94ada-kube-api-access-gsm2n\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.333872 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.333889 4861 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d8957c3-5da1-489d-8e12-f81810d94ada-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.510501 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.523410 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.541232 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:55 crc kubenswrapper[4861]: E0129 06:57:55.541985 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-metadata" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.542025 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-metadata" Jan 29 06:57:55 crc kubenswrapper[4861]: E0129 06:57:55.542061 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-log" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.542100 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-log" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.542430 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-log" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.542499 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" containerName="nova-metadata-metadata" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.544187 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.557037 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.557491 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.565506 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.640505 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-logs\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.640823 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-config-data\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.641017 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctgbt\" (UniqueName: \"kubernetes.io/projected/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-kube-api-access-ctgbt\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.641151 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.641387 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.743530 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctgbt\" (UniqueName: \"kubernetes.io/projected/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-kube-api-access-ctgbt\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.743576 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.743663 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.743692 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-logs\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.743721 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-config-data\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.744704 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-logs\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.748758 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-config-data\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.748994 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.750939 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.763954 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctgbt\" (UniqueName: \"kubernetes.io/projected/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-kube-api-access-ctgbt\") pod \"nova-metadata-0\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " pod="openstack/nova-metadata-0" Jan 29 06:57:55 crc kubenswrapper[4861]: I0129 06:57:55.894737 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.180485 4861 generic.go:334] "Generic (PLEG): container finished" podID="baec84f8-0437-41da-a5f5-7b88894605eb" containerID="62a58805c7d853890393b6f9dcd912e589adc6cfb153bd0108fd519065fee75c" exitCode=0 Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.180565 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"baec84f8-0437-41da-a5f5-7b88894605eb","Type":"ContainerDied","Data":"62a58805c7d853890393b6f9dcd912e589adc6cfb153bd0108fd519065fee75c"} Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.208010 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.253167 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-config-data\") pod \"baec84f8-0437-41da-a5f5-7b88894605eb\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.253354 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gg7p7\" (UniqueName: \"kubernetes.io/projected/baec84f8-0437-41da-a5f5-7b88894605eb-kube-api-access-gg7p7\") pod \"baec84f8-0437-41da-a5f5-7b88894605eb\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.253375 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-combined-ca-bundle\") pod \"baec84f8-0437-41da-a5f5-7b88894605eb\" (UID: \"baec84f8-0437-41da-a5f5-7b88894605eb\") " Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.257990 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baec84f8-0437-41da-a5f5-7b88894605eb-kube-api-access-gg7p7" (OuterVolumeSpecName: "kube-api-access-gg7p7") pod "baec84f8-0437-41da-a5f5-7b88894605eb" (UID: "baec84f8-0437-41da-a5f5-7b88894605eb"). InnerVolumeSpecName "kube-api-access-gg7p7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.278808 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-config-data" (OuterVolumeSpecName: "config-data") pod "baec84f8-0437-41da-a5f5-7b88894605eb" (UID: "baec84f8-0437-41da-a5f5-7b88894605eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.280234 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "baec84f8-0437-41da-a5f5-7b88894605eb" (UID: "baec84f8-0437-41da-a5f5-7b88894605eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.356554 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gg7p7\" (UniqueName: \"kubernetes.io/projected/baec84f8-0437-41da-a5f5-7b88894605eb-kube-api-access-gg7p7\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.356588 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.356601 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/baec84f8-0437-41da-a5f5-7b88894605eb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:57:56 crc kubenswrapper[4861]: I0129 06:57:56.422434 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:57:56 crc kubenswrapper[4861]: W0129 06:57:56.428516 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0f81cf5_e2b0_43da_af6e_de9c1d3c8aed.slice/crio-78f1c9bf2f659a883e25f8dbd870de96e32a2713d76aceed4dc8cc3c9e561f18 WatchSource:0}: Error finding container 78f1c9bf2f659a883e25f8dbd870de96e32a2713d76aceed4dc8cc3c9e561f18: Status 404 returned error can't find the container with id 78f1c9bf2f659a883e25f8dbd870de96e32a2713d76aceed4dc8cc3c9e561f18 Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.131877 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d8957c3-5da1-489d-8e12-f81810d94ada" path="/var/lib/kubelet/pods/9d8957c3-5da1-489d-8e12-f81810d94ada/volumes" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.196366 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"baec84f8-0437-41da-a5f5-7b88894605eb","Type":"ContainerDied","Data":"6ac861e78068334500e10f56df1418c49120feb0a093737cfded2a91c7aa0eb4"} Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.196427 4861 scope.go:117] "RemoveContainer" containerID="62a58805c7d853890393b6f9dcd912e589adc6cfb153bd0108fd519065fee75c" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.196442 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.202520 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed","Type":"ContainerStarted","Data":"114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194"} Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.202605 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed","Type":"ContainerStarted","Data":"d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea"} Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.202634 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed","Type":"ContainerStarted","Data":"78f1c9bf2f659a883e25f8dbd870de96e32a2713d76aceed4dc8cc3c9e561f18"} Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.245375 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.245348197 podStartE2EDuration="2.245348197s" podCreationTimestamp="2026-01-29 06:57:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:57.226403595 +0000 UTC m=+1368.897898212" watchObservedRunningTime="2026-01-29 06:57:57.245348197 +0000 UTC m=+1368.916842784" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.275292 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.285921 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.294960 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:57 crc kubenswrapper[4861]: E0129 06:57:57.295511 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baec84f8-0437-41da-a5f5-7b88894605eb" containerName="nova-scheduler-scheduler" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.295579 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="baec84f8-0437-41da-a5f5-7b88894605eb" containerName="nova-scheduler-scheduler" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.295825 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="baec84f8-0437-41da-a5f5-7b88894605eb" containerName="nova-scheduler-scheduler" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.296464 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.299239 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.303901 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.378725 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.378953 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvjjs\" (UniqueName: \"kubernetes.io/projected/a39b615c-006f-43b4-9f38-0fe1d0814696-kube-api-access-xvjjs\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.379129 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-config-data\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.481414 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.481482 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvjjs\" (UniqueName: \"kubernetes.io/projected/a39b615c-006f-43b4-9f38-0fe1d0814696-kube-api-access-xvjjs\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.481589 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-config-data\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.487374 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.494161 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-config-data\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.515006 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvjjs\" (UniqueName: \"kubernetes.io/projected/a39b615c-006f-43b4-9f38-0fe1d0814696-kube-api-access-xvjjs\") pod \"nova-scheduler-0\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " pod="openstack/nova-scheduler-0" Jan 29 06:57:57 crc kubenswrapper[4861]: I0129 06:57:57.626124 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:57:58 crc kubenswrapper[4861]: I0129 06:57:58.061223 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:57:58 crc kubenswrapper[4861]: W0129 06:57:58.067708 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda39b615c_006f_43b4_9f38_0fe1d0814696.slice/crio-dc165dd5df1f6d54ffd77bbe445c5b305514d69159bdd24d25e717fe0c7f5126 WatchSource:0}: Error finding container dc165dd5df1f6d54ffd77bbe445c5b305514d69159bdd24d25e717fe0c7f5126: Status 404 returned error can't find the container with id dc165dd5df1f6d54ffd77bbe445c5b305514d69159bdd24d25e717fe0c7f5126 Jan 29 06:57:58 crc kubenswrapper[4861]: I0129 06:57:58.217704 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a39b615c-006f-43b4-9f38-0fe1d0814696","Type":"ContainerStarted","Data":"dc165dd5df1f6d54ffd77bbe445c5b305514d69159bdd24d25e717fe0c7f5126"} Jan 29 06:57:59 crc kubenswrapper[4861]: I0129 06:57:59.135040 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baec84f8-0437-41da-a5f5-7b88894605eb" path="/var/lib/kubelet/pods/baec84f8-0437-41da-a5f5-7b88894605eb/volumes" Jan 29 06:57:59 crc kubenswrapper[4861]: I0129 06:57:59.235500 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a39b615c-006f-43b4-9f38-0fe1d0814696","Type":"ContainerStarted","Data":"10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149"} Jan 29 06:57:59 crc kubenswrapper[4861]: I0129 06:57:59.267877 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.267855977 podStartE2EDuration="2.267855977s" podCreationTimestamp="2026-01-29 06:57:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 06:57:59.253782241 +0000 UTC m=+1370.925276818" watchObservedRunningTime="2026-01-29 06:57:59.267855977 +0000 UTC m=+1370.939350544" Jan 29 06:58:00 crc kubenswrapper[4861]: I0129 06:58:00.895817 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 06:58:00 crc kubenswrapper[4861]: I0129 06:58:00.896290 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 06:58:02 crc kubenswrapper[4861]: I0129 06:58:02.626904 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 06:58:02 crc kubenswrapper[4861]: I0129 06:58:02.830623 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 06:58:02 crc kubenswrapper[4861]: I0129 06:58:02.831173 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 06:58:03 crc kubenswrapper[4861]: I0129 06:58:03.842286 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.209:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 06:58:03 crc kubenswrapper[4861]: I0129 06:58:03.842304 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.209:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.507287 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wczkh"] Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.509346 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.530001 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wczkh"] Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.565702 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-catalog-content\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.565887 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b64fk\" (UniqueName: \"kubernetes.io/projected/f7dfeca3-8fa1-4323-aab9-13f91619ec59-kube-api-access-b64fk\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.565966 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-utilities\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.667514 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b64fk\" (UniqueName: \"kubernetes.io/projected/f7dfeca3-8fa1-4323-aab9-13f91619ec59-kube-api-access-b64fk\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.667591 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-utilities\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.667685 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-catalog-content\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.668211 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-catalog-content\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.668607 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-utilities\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.688208 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b64fk\" (UniqueName: \"kubernetes.io/projected/f7dfeca3-8fa1-4323-aab9-13f91619ec59-kube-api-access-b64fk\") pod \"redhat-operators-wczkh\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.845961 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.895223 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 06:58:05 crc kubenswrapper[4861]: I0129 06:58:05.895265 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 06:58:06 crc kubenswrapper[4861]: I0129 06:58:06.777776 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wczkh"] Jan 29 06:58:06 crc kubenswrapper[4861]: W0129 06:58:06.781224 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7dfeca3_8fa1_4323_aab9_13f91619ec59.slice/crio-666edcbc8b340c7a6b84386d45c282abd9454249b763fdd3ead23f94d9987590 WatchSource:0}: Error finding container 666edcbc8b340c7a6b84386d45c282abd9454249b763fdd3ead23f94d9987590: Status 404 returned error can't find the container with id 666edcbc8b340c7a6b84386d45c282abd9454249b763fdd3ead23f94d9987590 Jan 29 06:58:06 crc kubenswrapper[4861]: I0129 06:58:06.908289 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 06:58:06 crc kubenswrapper[4861]: I0129 06:58:06.908296 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 06:58:07 crc kubenswrapper[4861]: I0129 06:58:07.313590 4861 generic.go:334] "Generic (PLEG): container finished" podID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerID="a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece" exitCode=0 Jan 29 06:58:07 crc kubenswrapper[4861]: I0129 06:58:07.313648 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wczkh" event={"ID":"f7dfeca3-8fa1-4323-aab9-13f91619ec59","Type":"ContainerDied","Data":"a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece"} Jan 29 06:58:07 crc kubenswrapper[4861]: I0129 06:58:07.313683 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wczkh" event={"ID":"f7dfeca3-8fa1-4323-aab9-13f91619ec59","Type":"ContainerStarted","Data":"666edcbc8b340c7a6b84386d45c282abd9454249b763fdd3ead23f94d9987590"} Jan 29 06:58:07 crc kubenswrapper[4861]: I0129 06:58:07.626627 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 06:58:07 crc kubenswrapper[4861]: I0129 06:58:07.654028 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 06:58:08 crc kubenswrapper[4861]: I0129 06:58:08.333965 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wczkh" event={"ID":"f7dfeca3-8fa1-4323-aab9-13f91619ec59","Type":"ContainerStarted","Data":"f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591"} Jan 29 06:58:08 crc kubenswrapper[4861]: I0129 06:58:08.387600 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 06:58:10 crc kubenswrapper[4861]: I0129 06:58:10.361096 4861 generic.go:334] "Generic (PLEG): container finished" podID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerID="f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591" exitCode=0 Jan 29 06:58:10 crc kubenswrapper[4861]: I0129 06:58:10.361196 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wczkh" event={"ID":"f7dfeca3-8fa1-4323-aab9-13f91619ec59","Type":"ContainerDied","Data":"f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591"} Jan 29 06:58:11 crc kubenswrapper[4861]: I0129 06:58:11.651852 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 06:58:12 crc kubenswrapper[4861]: I0129 06:58:12.382454 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wczkh" event={"ID":"f7dfeca3-8fa1-4323-aab9-13f91619ec59","Type":"ContainerStarted","Data":"380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9"} Jan 29 06:58:12 crc kubenswrapper[4861]: I0129 06:58:12.413160 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wczkh" podStartSLOduration=2.978926737 podStartE2EDuration="7.413127684s" podCreationTimestamp="2026-01-29 06:58:05 +0000 UTC" firstStartedPulling="2026-01-29 06:58:07.315273308 +0000 UTC m=+1378.986767865" lastFinishedPulling="2026-01-29 06:58:11.749474215 +0000 UTC m=+1383.420968812" observedRunningTime="2026-01-29 06:58:12.408863932 +0000 UTC m=+1384.080358499" watchObservedRunningTime="2026-01-29 06:58:12.413127684 +0000 UTC m=+1384.084622281" Jan 29 06:58:12 crc kubenswrapper[4861]: I0129 06:58:12.842862 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 06:58:12 crc kubenswrapper[4861]: I0129 06:58:12.846625 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 06:58:12 crc kubenswrapper[4861]: I0129 06:58:12.854633 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 06:58:12 crc kubenswrapper[4861]: I0129 06:58:12.858211 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 06:58:13 crc kubenswrapper[4861]: I0129 06:58:13.391330 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 06:58:13 crc kubenswrapper[4861]: I0129 06:58:13.400934 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 06:58:15 crc kubenswrapper[4861]: I0129 06:58:15.846692 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:15 crc kubenswrapper[4861]: I0129 06:58:15.847064 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:15 crc kubenswrapper[4861]: I0129 06:58:15.905245 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 06:58:15 crc kubenswrapper[4861]: I0129 06:58:15.905911 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 06:58:15 crc kubenswrapper[4861]: I0129 06:58:15.910057 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 06:58:16 crc kubenswrapper[4861]: I0129 06:58:16.435779 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 06:58:16 crc kubenswrapper[4861]: I0129 06:58:16.889469 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wczkh" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="registry-server" probeResult="failure" output=< Jan 29 06:58:16 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 06:58:16 crc kubenswrapper[4861]: > Jan 29 06:58:26 crc kubenswrapper[4861]: I0129 06:58:26.893247 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wczkh" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="registry-server" probeResult="failure" output=< Jan 29 06:58:26 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 06:58:26 crc kubenswrapper[4861]: > Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.372456 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-3399-account-create-update-w9zr8"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.373921 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.403190 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc7r5\" (UniqueName: \"kubernetes.io/projected/3949b327-31a1-4dfa-bc04-e13b6c033ecd-kube-api-access-qc7r5\") pod \"cinder-3399-account-create-update-w9zr8\" (UID: \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\") " pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.403362 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3949b327-31a1-4dfa-bc04-e13b6c033ecd-operator-scripts\") pod \"cinder-3399-account-create-update-w9zr8\" (UID: \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\") " pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.420008 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.436041 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3399-account-create-update-w9zr8"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.462755 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-zh9b7"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.463978 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.498338 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.504937 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3949b327-31a1-4dfa-bc04-e13b6c033ecd-operator-scripts\") pod \"cinder-3399-account-create-update-w9zr8\" (UID: \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\") " pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.505013 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc7r5\" (UniqueName: \"kubernetes.io/projected/3949b327-31a1-4dfa-bc04-e13b6c033ecd-kube-api-access-qc7r5\") pod \"cinder-3399-account-create-update-w9zr8\" (UID: \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\") " pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.505086 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts\") pod \"root-account-create-update-zh9b7\" (UID: \"441b7714-dd72-448b-a5a8-b9f56057da43\") " pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.505105 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2jsc\" (UniqueName: \"kubernetes.io/projected/441b7714-dd72-448b-a5a8-b9f56057da43-kube-api-access-l2jsc\") pod \"root-account-create-update-zh9b7\" (UID: \"441b7714-dd72-448b-a5a8-b9f56057da43\") " pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.505675 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3949b327-31a1-4dfa-bc04-e13b6c033ecd-operator-scripts\") pod \"cinder-3399-account-create-update-w9zr8\" (UID: \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\") " pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.523372 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zh9b7"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.547926 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-3399-account-create-update-sg992"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.575390 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-3399-account-create-update-sg992"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.604791 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc7r5\" (UniqueName: \"kubernetes.io/projected/3949b327-31a1-4dfa-bc04-e13b6c033ecd-kube-api-access-qc7r5\") pod \"cinder-3399-account-create-update-w9zr8\" (UID: \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\") " pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.607119 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts\") pod \"root-account-create-update-zh9b7\" (UID: \"441b7714-dd72-448b-a5a8-b9f56057da43\") " pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.607163 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2jsc\" (UniqueName: \"kubernetes.io/projected/441b7714-dd72-448b-a5a8-b9f56057da43-kube-api-access-l2jsc\") pod \"root-account-create-update-zh9b7\" (UID: \"441b7714-dd72-448b-a5a8-b9f56057da43\") " pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.608588 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts\") pod \"root-account-create-update-zh9b7\" (UID: \"441b7714-dd72-448b-a5a8-b9f56057da43\") " pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.646174 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-98fb-account-create-update-jv6v4"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.647326 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.661939 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.678830 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2jsc\" (UniqueName: \"kubernetes.io/projected/441b7714-dd72-448b-a5a8-b9f56057da43-kube-api-access-l2jsc\") pod \"root-account-create-update-zh9b7\" (UID: \"441b7714-dd72-448b-a5a8-b9f56057da43\") " pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.695483 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.746256 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-98fb-account-create-update-jv6v4"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.780548 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.781768 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-n8bb6"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.820888 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66a7721f-92e9-498c-97b8-cbe9890220d9-operator-scripts\") pod \"placement-98fb-account-create-update-jv6v4\" (UID: \"66a7721f-92e9-498c-97b8-cbe9890220d9\") " pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.821050 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58tc6\" (UniqueName: \"kubernetes.io/projected/66a7721f-92e9-498c-97b8-cbe9890220d9-kube-api-access-58tc6\") pod \"placement-98fb-account-create-update-jv6v4\" (UID: \"66a7721f-92e9-498c-97b8-cbe9890220d9\") " pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.858164 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.858379 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="4c1315db-486b-4b63-bdb0-630c247d49b4" containerName="openstackclient" containerID="cri-o://46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b" gracePeriod=2 Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.884584 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.914306 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-n8bb6"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.931490 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58tc6\" (UniqueName: \"kubernetes.io/projected/66a7721f-92e9-498c-97b8-cbe9890220d9-kube-api-access-58tc6\") pod \"placement-98fb-account-create-update-jv6v4\" (UID: \"66a7721f-92e9-498c-97b8-cbe9890220d9\") " pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.931562 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66a7721f-92e9-498c-97b8-cbe9890220d9-operator-scripts\") pod \"placement-98fb-account-create-update-jv6v4\" (UID: \"66a7721f-92e9-498c-97b8-cbe9890220d9\") " pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.932296 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66a7721f-92e9-498c-97b8-cbe9890220d9-operator-scripts\") pod \"placement-98fb-account-create-update-jv6v4\" (UID: \"66a7721f-92e9-498c-97b8-cbe9890220d9\") " pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.955543 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-c008-account-create-update-cjb9h"] Jan 29 06:58:32 crc kubenswrapper[4861]: E0129 06:58:32.957506 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1315db-486b-4b63-bdb0-630c247d49b4" containerName="openstackclient" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.957539 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1315db-486b-4b63-bdb0-630c247d49b4" containerName="openstackclient" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.957761 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c1315db-486b-4b63-bdb0-630c247d49b4" containerName="openstackclient" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.960254 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.970199 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-98fb-account-create-update-cwrzk"] Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.985570 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 29 06:58:32 crc kubenswrapper[4861]: I0129 06:58:32.989182 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-98fb-account-create-update-cwrzk"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.012198 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58tc6\" (UniqueName: \"kubernetes.io/projected/66a7721f-92e9-498c-97b8-cbe9890220d9-kube-api-access-58tc6\") pod \"placement-98fb-account-create-update-jv6v4\" (UID: \"66a7721f-92e9-498c-97b8-cbe9890220d9\") " pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.021220 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-c008-account-create-update-cjb9h"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.049210 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-733f-account-create-update-v8gq6"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.050537 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.055505 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.064728 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.082532 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-733f-account-create-update-v8gq6"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.143092 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7d75aa6-1d18-4901-874c-e6b9db142421-operator-scripts\") pod \"nova-api-733f-account-create-update-v8gq6\" (UID: \"b7d75aa6-1d18-4901-874c-e6b9db142421\") " pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.143165 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f95675a-f692-4c29-90da-01eda11003ac-operator-scripts\") pod \"glance-c008-account-create-update-cjb9h\" (UID: \"1f95675a-f692-4c29-90da-01eda11003ac\") " pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.143228 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8stx\" (UniqueName: \"kubernetes.io/projected/1f95675a-f692-4c29-90da-01eda11003ac-kube-api-access-z8stx\") pod \"glance-c008-account-create-update-cjb9h\" (UID: \"1f95675a-f692-4c29-90da-01eda11003ac\") " pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.143256 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9fsj\" (UniqueName: \"kubernetes.io/projected/b7d75aa6-1d18-4901-874c-e6b9db142421-kube-api-access-s9fsj\") pod \"nova-api-733f-account-create-update-v8gq6\" (UID: \"b7d75aa6-1d18-4901-874c-e6b9db142421\") " pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.159456 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46b65d05-b04a-4e60-b22f-d47aa0ef69e4" path="/var/lib/kubelet/pods/46b65d05-b04a-4e60-b22f-d47aa0ef69e4/volumes" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.159983 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bc1bcf4-064f-4460-8d8f-5e619d79dbba" path="/var/lib/kubelet/pods/7bc1bcf4-064f-4460-8d8f-5e619d79dbba/volumes" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.169914 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b379c23e-2bef-4aa8-b656-e6c152261562" path="/var/lib/kubelet/pods/b379c23e-2bef-4aa8-b656-e6c152261562/volumes" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.170510 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-874c5"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.179635 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.183871 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.201345 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-c008-account-create-update-7xcsr"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.230970 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-c008-account-create-update-7xcsr"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.243890 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f95675a-f692-4c29-90da-01eda11003ac-operator-scripts\") pod \"glance-c008-account-create-update-cjb9h\" (UID: \"1f95675a-f692-4c29-90da-01eda11003ac\") " pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.243958 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8stx\" (UniqueName: \"kubernetes.io/projected/1f95675a-f692-4c29-90da-01eda11003ac-kube-api-access-z8stx\") pod \"glance-c008-account-create-update-cjb9h\" (UID: \"1f95675a-f692-4c29-90da-01eda11003ac\") " pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.243985 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7hgd\" (UniqueName: \"kubernetes.io/projected/b9c95c6e-e80b-4d39-8209-1dbd1c237351-kube-api-access-n7hgd\") pod \"nova-cell0-1f01-account-create-update-874c5\" (UID: \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\") " pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.244007 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9fsj\" (UniqueName: \"kubernetes.io/projected/b7d75aa6-1d18-4901-874c-e6b9db142421-kube-api-access-s9fsj\") pod \"nova-api-733f-account-create-update-v8gq6\" (UID: \"b7d75aa6-1d18-4901-874c-e6b9db142421\") " pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.244033 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c95c6e-e80b-4d39-8209-1dbd1c237351-operator-scripts\") pod \"nova-cell0-1f01-account-create-update-874c5\" (UID: \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\") " pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.244134 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7d75aa6-1d18-4901-874c-e6b9db142421-operator-scripts\") pod \"nova-api-733f-account-create-update-v8gq6\" (UID: \"b7d75aa6-1d18-4901-874c-e6b9db142421\") " pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.244736 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7d75aa6-1d18-4901-874c-e6b9db142421-operator-scripts\") pod \"nova-api-733f-account-create-update-v8gq6\" (UID: \"b7d75aa6-1d18-4901-874c-e6b9db142421\") " pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.245210 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f95675a-f692-4c29-90da-01eda11003ac-operator-scripts\") pod \"glance-c008-account-create-update-cjb9h\" (UID: \"1f95675a-f692-4c29-90da-01eda11003ac\") " pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.282648 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9fsj\" (UniqueName: \"kubernetes.io/projected/b7d75aa6-1d18-4901-874c-e6b9db142421-kube-api-access-s9fsj\") pod \"nova-api-733f-account-create-update-v8gq6\" (UID: \"b7d75aa6-1d18-4901-874c-e6b9db142421\") " pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.285606 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8stx\" (UniqueName: \"kubernetes.io/projected/1f95675a-f692-4c29-90da-01eda11003ac-kube-api-access-z8stx\") pod \"glance-c008-account-create-update-cjb9h\" (UID: \"1f95675a-f692-4c29-90da-01eda11003ac\") " pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.309736 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.310256 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-874c5"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.334475 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.335602 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="openstack-network-exporter" containerID="cri-o://bdb5e7afda9ae61df28a1d053e02b61d09e0d2ad8787a4805fb51623eb383c1d" gracePeriod=300 Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.350574 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7hgd\" (UniqueName: \"kubernetes.io/projected/b9c95c6e-e80b-4d39-8209-1dbd1c237351-kube-api-access-n7hgd\") pod \"nova-cell0-1f01-account-create-update-874c5\" (UID: \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\") " pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.350621 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c95c6e-e80b-4d39-8209-1dbd1c237351-operator-scripts\") pod \"nova-cell0-1f01-account-create-update-874c5\" (UID: \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\") " pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.351320 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c95c6e-e80b-4d39-8209-1dbd1c237351-operator-scripts\") pod \"nova-cell0-1f01-account-create-update-874c5\" (UID: \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\") " pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.386264 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7hgd\" (UniqueName: \"kubernetes.io/projected/b9c95c6e-e80b-4d39-8209-1dbd1c237351-kube-api-access-n7hgd\") pod \"nova-cell0-1f01-account-create-update-874c5\" (UID: \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\") " pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.386475 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.421003 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-733f-account-create-update-rmh9j"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.463479 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-733f-account-create-update-rmh9j"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.480857 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="ovsdbserver-sb" containerID="cri-o://0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83" gracePeriod=300 Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.514888 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-20e6-account-create-update-wrx86"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.521501 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.521817 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.530383 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.552561 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-20e6-account-create-update-wrx86"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.554762 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmgqg\" (UniqueName: \"kubernetes.io/projected/9a38ce6f-f3ff-4976-8acb-9576d89df924-kube-api-access-gmgqg\") pod \"barbican-20e6-account-create-update-wrx86\" (UID: \"9a38ce6f-f3ff-4976-8acb-9576d89df924\") " pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.554840 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a38ce6f-f3ff-4976-8acb-9576d89df924-operator-scripts\") pod \"barbican-20e6-account-create-update-wrx86\" (UID: \"9a38ce6f-f3ff-4976-8acb-9576d89df924\") " pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.562867 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-0391-account-create-update-jq5xd"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.567007 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.578515 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.606173 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.623249 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0391-account-create-update-jq5xd"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.656935 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmgqg\" (UniqueName: \"kubernetes.io/projected/9a38ce6f-f3ff-4976-8acb-9576d89df924-kube-api-access-gmgqg\") pod \"barbican-20e6-account-create-update-wrx86\" (UID: \"9a38ce6f-f3ff-4976-8acb-9576d89df924\") " pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.656998 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a38ce6f-f3ff-4976-8acb-9576d89df924-operator-scripts\") pod \"barbican-20e6-account-create-update-wrx86\" (UID: \"9a38ce6f-f3ff-4976-8acb-9576d89df924\") " pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.660826 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a38ce6f-f3ff-4976-8acb-9576d89df924-operator-scripts\") pod \"barbican-20e6-account-create-update-wrx86\" (UID: \"9a38ce6f-f3ff-4976-8acb-9576d89df924\") " pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.692890 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a1432e17-a4e2-4cde-a3d7-89eddf9973e1/ovsdbserver-sb/0.log" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.692937 4861 generic.go:334] "Generic (PLEG): container finished" podID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerID="bdb5e7afda9ae61df28a1d053e02b61d09e0d2ad8787a4805fb51623eb383c1d" exitCode=2 Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.692955 4861 generic.go:334] "Generic (PLEG): container finished" podID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerID="0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83" exitCode=143 Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.692976 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a1432e17-a4e2-4cde-a3d7-89eddf9973e1","Type":"ContainerDied","Data":"bdb5e7afda9ae61df28a1d053e02b61d09e0d2ad8787a4805fb51623eb383c1d"} Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.693026 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a1432e17-a4e2-4cde-a3d7-89eddf9973e1","Type":"ContainerDied","Data":"0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83"} Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.728156 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.728926 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerName="openstack-network-exporter" containerID="cri-o://162f0b5fe401e18a951e9523bbee70e5964f24419ec2081475b2ab90051cf4b6" gracePeriod=300 Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.734779 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmgqg\" (UniqueName: \"kubernetes.io/projected/9a38ce6f-f3ff-4976-8acb-9576d89df924-kube-api-access-gmgqg\") pod \"barbican-20e6-account-create-update-wrx86\" (UID: \"9a38ce6f-f3ff-4976-8acb-9576d89df924\") " pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.743155 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-4jqs4"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.755733 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-4jqs4"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.759048 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpmbm\" (UniqueName: \"kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm\") pod \"nova-cell1-0391-account-create-update-jq5xd\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.759142 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts\") pod \"nova-cell1-0391-account-create-update-jq5xd\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.759831 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.759891 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data podName:3b8b1385-123a-4b60-af39-82d6492a65c2 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:34.259876242 +0000 UTC m=+1405.931370799 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data") pod "rabbitmq-cell1-server-0" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2") : configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.766404 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dfed-account-create-update-t7cnb"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.767930 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.771141 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.812118 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dfed-account-create-update-t7cnb"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.820277 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-c5jkq"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.833133 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-20e6-account-create-update-g8kvq"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.857797 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-c5jkq"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.860697 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fll29\" (UniqueName: \"kubernetes.io/projected/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-kube-api-access-fll29\") pod \"neutron-dfed-account-create-update-t7cnb\" (UID: \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\") " pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.860913 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpmbm\" (UniqueName: \"kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm\") pod \"nova-cell1-0391-account-create-update-jq5xd\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.861575 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts\") pod \"nova-cell1-0391-account-create-update-jq5xd\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.861664 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-operator-scripts\") pod \"neutron-dfed-account-create-update-t7cnb\" (UID: \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\") " pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.862729 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts\") pod \"nova-cell1-0391-account-create-update-jq5xd\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.874284 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-20e6-account-create-update-g8kvq"] Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.875331 4861 projected.go:194] Error preparing data for projected volume kube-api-access-fpmbm for pod openstack/nova-cell1-0391-account-create-update-jq5xd: failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.875396 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm podName:66f8ecd1-e1dd-4663-8681-59fe89a02691 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:34.375377869 +0000 UTC m=+1406.046872426 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-fpmbm" (UniqueName: "kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm") pod "nova-cell1-0391-account-create-update-jq5xd" (UID: "66f8ecd1-e1dd-4663-8681-59fe89a02691") : failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.894986 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:33 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: if [ -n "" ]; then Jan 29 06:58:33 crc kubenswrapper[4861]: GRANT_DATABASE="" Jan 29 06:58:33 crc kubenswrapper[4861]: else Jan 29 06:58:33 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:33 crc kubenswrapper[4861]: fi Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:33 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:33 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:33 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:33 crc kubenswrapper[4861]: # support updates Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.895912 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:33 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: if [ -n "cinder" ]; then Jan 29 06:58:33 crc kubenswrapper[4861]: GRANT_DATABASE="cinder" Jan 29 06:58:33 crc kubenswrapper[4861]: else Jan 29 06:58:33 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:33 crc kubenswrapper[4861]: fi Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:33 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:33 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:33 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:33 crc kubenswrapper[4861]: # support updates Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.896042 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-zh9b7" podUID="441b7714-dd72-448b-a5a8-b9f56057da43" Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.897352 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-3399-account-create-update-w9zr8" podUID="3949b327-31a1-4dfa-bc04-e13b6c033ecd" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.905326 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-0391-account-create-update-9sb8t"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.921859 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-0391-account-create-update-9sb8t"] Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.932187 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerName="ovsdbserver-nb" containerID="cri-o://99b14fb198d584fcdb36a249a3809f469be4cd3e4644d253366d9f932150b9eb" gracePeriod=300 Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.949170 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-dfed-account-create-update-hvzjq"] Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.970780 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:33 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: if [ -n "placement" ]; then Jan 29 06:58:33 crc kubenswrapper[4861]: GRANT_DATABASE="placement" Jan 29 06:58:33 crc kubenswrapper[4861]: else Jan 29 06:58:33 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:33 crc kubenswrapper[4861]: fi Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:33 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:33 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:33 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:33 crc kubenswrapper[4861]: # support updates Jan 29 06:58:33 crc kubenswrapper[4861]: Jan 29 06:58:33 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:33 crc kubenswrapper[4861]: E0129 06:58:33.971886 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"placement-db-secret\\\" not found\"" pod="openstack/placement-98fb-account-create-update-jv6v4" podUID="66a7721f-92e9-498c-97b8-cbe9890220d9" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.989505 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.990012 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-operator-scripts\") pod \"neutron-dfed-account-create-update-t7cnb\" (UID: \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\") " pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.990233 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fll29\" (UniqueName: \"kubernetes.io/projected/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-kube-api-access-fll29\") pod \"neutron-dfed-account-create-update-t7cnb\" (UID: \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\") " pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.990967 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-operator-scripts\") pod \"neutron-dfed-account-create-update-t7cnb\" (UID: \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\") " pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:33 crc kubenswrapper[4861]: I0129 06:58:33.997208 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-g89td"] Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.012010 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1432e17_a4e2_4cde_a3d7_89eddf9973e1.slice/crio-conmon-bdb5e7afda9ae61df28a1d053e02b61d09e0d2ad8787a4805fb51623eb383c1d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1432e17_a4e2_4cde_a3d7_89eddf9973e1.slice/crio-conmon-0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1432e17_a4e2_4cde_a3d7_89eddf9973e1.slice/crio-bdb5e7afda9ae61df28a1d053e02b61d09e0d2ad8787a4805fb51623eb383c1d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1432e17_a4e2_4cde_a3d7_89eddf9973e1.slice/crio-0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83.scope\": RecentStats: unable to find data in memory cache]" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.039693 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-g89td"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.051703 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-dfed-account-create-update-hvzjq"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.057700 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fll29\" (UniqueName: \"kubernetes.io/projected/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-kube-api-access-fll29\") pod \"neutron-dfed-account-create-update-t7cnb\" (UID: \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\") " pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.062317 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.062567 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerName="ovn-northd" containerID="cri-o://82d4301c7f8e1b6d25f3d60567395a1ff2635c17934b75e63917065ada770d83" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.062799 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerName="openstack-network-exporter" containerID="cri-o://015f72c114bfd8ca01ff83fcfd7253c5311da2c4dbfdaa591c7feb5e53a0693d" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.115086 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-psncj"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.153822 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-psncj"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.161540 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.197528 4861 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.197603 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts podName:66f8ecd1-e1dd-4663-8681-59fe89a02691 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:34.697585079 +0000 UTC m=+1406.369079636 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts") pod "nova-cell1-0391-account-create-update-jq5xd" (UID: "66f8ecd1-e1dd-4663-8681-59fe89a02691") : configmap "openstack-cell1-scripts" not found Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.220631 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-h8qb2"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.220929 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-h8qb2" podUID="a871f110-29fe-4e80-b339-5209aebc0652" containerName="openstack-network-exporter" containerID="cri-o://96077c6516cf385f97eadb5e32fbb191b2f9052a56daddb824c1bc8fcead61a3" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.246001 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-6n7w9"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.298218 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-z5wvn"] Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.301498 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.301591 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data podName:3b8b1385-123a-4b60-af39-82d6492a65c2 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:35.301572921 +0000 UTC m=+1406.973067478 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data") pod "rabbitmq-cell1-server-0" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2") : configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.319961 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-zbm2j"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.330134 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-zbm2j"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.344750 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-ktt88"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.345028 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" podUID="86dd900f-d608-496c-91b3-be95d914cf58" containerName="dnsmasq-dns" containerID="cri-o://a8543a52cd59c6b1f2aa0887f12875b0d96e617f0a1ae249e3c7261009db9ece" gracePeriod=10 Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.356599 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:34 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: if [ -n "glance" ]; then Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="glance" Jan 29 06:58:34 crc kubenswrapper[4861]: else Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:34 crc kubenswrapper[4861]: fi Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:34 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:34 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:34 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:34 crc kubenswrapper[4861]: # support updates Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.358190 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-rh97j"] Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.358281 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-c008-account-create-update-cjb9h" podUID="1f95675a-f692-4c29-90da-01eda11003ac" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.371135 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-rh97j"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.384117 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3399-account-create-update-w9zr8"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.394804 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-x75d6"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.403818 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpmbm\" (UniqueName: \"kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm\") pod \"nova-cell1-0391-account-create-update-jq5xd\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.407038 4861 projected.go:194] Error preparing data for projected volume kube-api-access-fpmbm for pod openstack/nova-cell1-0391-account-create-update-jq5xd: failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.407117 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm podName:66f8ecd1-e1dd-4663-8681-59fe89a02691 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:35.407102239 +0000 UTC m=+1407.078596786 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-fpmbm" (UniqueName: "kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm") pod "nova-cell1-0391-account-create-update-jq5xd" (UID: "66f8ecd1-e1dd-4663-8681-59fe89a02691") : failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.417121 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-x75d6"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.431137 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-5j2wv"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.440320 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-5j2wv"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.448084 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zh9b7"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.459082 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.470825 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-98fb-account-create-update-jv6v4"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.473631 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-8b6sf"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.492235 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" podUID="86dd900f-d608-496c-91b3-be95d914cf58" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.204:5353: connect: connection refused" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.504701 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-8b6sf"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.580847 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.581314 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-server" containerID="cri-o://27fc60fdd9d503cf21c40b0704ab3f668d5965d81d74cbbc4c3aa6e2ce528d23" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.584228 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-server" containerID="cri-o://696299cd0fa4bb5069c2910a6be63baa743730b8326a70bb3ffd8aa9d1c825ec" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.584699 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="swift-recon-cron" containerID="cri-o://0f6ad2c5dcab8a4a865c78703af5ec17abaa3949c079878716530d1bf7fd0391" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.584743 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="rsync" containerID="cri-o://849976197be27f3f0414f54d8c975813716a50cdd59b37975b4eb4bb0b453c69" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.584794 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-expirer" containerID="cri-o://80b76bbf5574a1ffe9a28896fadf09a48689fb5bb78991c8c124528c6850d0ee" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.584863 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-updater" containerID="cri-o://300cdcb844a68c46fd719e6be6e862e7b417f885d1ab7289bf038801298b0951" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.584903 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-auditor" containerID="cri-o://e2a3b495086295e31b7ed56c3d2932e3f985fccd26d8e9e239e77653b59a0d32" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.584963 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-replicator" containerID="cri-o://7fed2197542cb4f3117973c4387005866a5b3aa792d7b6f414b399fca8226503" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.584999 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-server" containerID="cri-o://5f0359b2c69c9a01c0a74bbb8ecc34b7cb21acbd0a142f267a70aaf243d0d4d1" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.585026 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-updater" containerID="cri-o://3281266ddbd401b2f04a7cb7e231cd35c5bced4b7f65472c79c6cab82698c818" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.585054 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-auditor" containerID="cri-o://18d6ec1b3d371c36c925fb4104455a8183e0a1995e0abd435a9954ffab121835" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.585096 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-replicator" containerID="cri-o://42fe23b69a4684b68ede63233c8ea85578f5383ad1505896e099548f6e44a6ea" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.585134 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-auditor" containerID="cri-o://c2889e92275d93552a69c4569021d1f48b14b5ad80332e996fa65c8fc322719d" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.585163 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-reaper" containerID="cri-o://f247857b6eb8ade45650fa7fc5c2b6bff1ac506097b24f9f3cdf86be8a43d2d4" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.585194 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-replicator" containerID="cri-o://90931a6cfdb8a44357367186d2c4396fd4c9ac22d948ca358a02706b89784468" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.616459 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.616506 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data podName:5966cedc-8ab5-4390-906b-c5ac39333e09 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:35.116493797 +0000 UTC m=+1406.787988354 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data") pod "rabbitmq-server-0" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09") : configmap "rabbitmq-config-data" not found Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.644869 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.645117 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerName="cinder-scheduler" containerID="cri-o://fd9675094dbc5e4671db27d2d11399c30b5682eca9316dcb9802ca14217ef8f4" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.645512 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerName="probe" containerID="cri-o://9fd75cdc74ab09059f227b09ddff0f8e7c83f3521d26bb3444075ede07ae852b" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.663635 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-6957679874-pnq22"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.663919 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-6957679874-pnq22" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerName="placement-log" containerID="cri-o://403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.664359 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-6957679874-pnq22" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerName="placement-api" containerID="cri-o://64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.722147 4861 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.722264 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts podName:66f8ecd1-e1dd-4663-8681-59fe89a02691 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:35.722249061 +0000 UTC m=+1407.393743618 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts") pod "nova-cell1-0391-account-create-update-jq5xd" (UID: "66f8ecd1-e1dd-4663-8681-59fe89a02691") : configmap "openstack-cell1-scripts" not found Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.764824 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.765041 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerName="cinder-api-log" containerID="cri-o://1040f23ce9435abf9d98fe86eda2bd1c172d7b64d769b973274d71099bd7ad84" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.765325 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerName="cinder-api" containerID="cri-o://dfde65f746a284f0742bfc416b14134fe608d8f0ca69edc5ab5445ada8954bbe" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.786638 4861 generic.go:334] "Generic (PLEG): container finished" podID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerID="015f72c114bfd8ca01ff83fcfd7253c5311da2c4dbfdaa591c7feb5e53a0693d" exitCode=2 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.786746 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"93a9df75-0ea9-457b-84f0-17b95d5dcced","Type":"ContainerDied","Data":"015f72c114bfd8ca01ff83fcfd7253c5311da2c4dbfdaa591c7feb5e53a0693d"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.812978 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-h8qb2_a871f110-29fe-4e80-b339-5209aebc0652/openstack-network-exporter/0.log" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.813362 4861 generic.go:334] "Generic (PLEG): container finished" podID="a871f110-29fe-4e80-b339-5209aebc0652" containerID="96077c6516cf385f97eadb5e32fbb191b2f9052a56daddb824c1bc8fcead61a3" exitCode=2 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.813458 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-h8qb2" event={"ID":"a871f110-29fe-4e80-b339-5209aebc0652","Type":"ContainerDied","Data":"96077c6516cf385f97eadb5e32fbb191b2f9052a56daddb824c1bc8fcead61a3"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.820630 4861 generic.go:334] "Generic (PLEG): container finished" podID="86dd900f-d608-496c-91b3-be95d914cf58" containerID="a8543a52cd59c6b1f2aa0887f12875b0d96e617f0a1ae249e3c7261009db9ece" exitCode=0 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.820710 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" event={"ID":"86dd900f-d608-496c-91b3-be95d914cf58","Type":"ContainerDied","Data":"a8543a52cd59c6b1f2aa0887f12875b0d96e617f0a1ae249e3c7261009db9ece"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.824286 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3f8ce486-c345-41aa-b641-b7c4ef27ecfe/ovsdbserver-nb/0.log" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.824347 4861 generic.go:334] "Generic (PLEG): container finished" podID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerID="162f0b5fe401e18a951e9523bbee70e5964f24419ec2081475b2ab90051cf4b6" exitCode=2 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.824365 4861 generic.go:334] "Generic (PLEG): container finished" podID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerID="99b14fb198d584fcdb36a249a3809f469be4cd3e4644d253366d9f932150b9eb" exitCode=143 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.824460 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f8ce486-c345-41aa-b641-b7c4ef27ecfe","Type":"ContainerDied","Data":"162f0b5fe401e18a951e9523bbee70e5964f24419ec2081475b2ab90051cf4b6"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.824511 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f8ce486-c345-41aa-b641-b7c4ef27ecfe","Type":"ContainerDied","Data":"99b14fb198d584fcdb36a249a3809f469be4cd3e4644d253366d9f932150b9eb"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.842686 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-ngprs"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.842725 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-98fb-account-create-update-jv6v4" event={"ID":"66a7721f-92e9-498c-97b8-cbe9890220d9","Type":"ContainerStarted","Data":"ef908f445913aafc1c18fb5d053f6266894656d2d7f42a5a7bf2bee15946578c"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.849591 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-ngprs"] Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.851687 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:34 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: if [ -n "placement" ]; then Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="placement" Jan 29 06:58:34 crc kubenswrapper[4861]: else Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:34 crc kubenswrapper[4861]: fi Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:34 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:34 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:34 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:34 crc kubenswrapper[4861]: # support updates Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.851760 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zh9b7" event={"ID":"441b7714-dd72-448b-a5a8-b9f56057da43","Type":"ContainerStarted","Data":"b1abb4c232dd163ca21fd48e8a560d095fe73abcd9496bafe43db977fec11631"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.852014 4861 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-zh9b7" secret="" err="secret \"galera-openstack-cell1-dockercfg-dt4gs\" not found" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.852857 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"placement-db-secret\\\" not found\"" pod="openstack/placement-98fb-account-create-update-jv6v4" podUID="66a7721f-92e9-498c-97b8-cbe9890220d9" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.855931 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:34 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: if [ -n "" ]; then Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="" Jan 29 06:58:34 crc kubenswrapper[4861]: else Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:34 crc kubenswrapper[4861]: fi Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:34 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:34 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:34 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:34 crc kubenswrapper[4861]: # support updates Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.859321 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-zh9b7" podUID="441b7714-dd72-448b-a5a8-b9f56057da43" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.860120 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3399-account-create-update-w9zr8" event={"ID":"3949b327-31a1-4dfa-bc04-e13b6c033ecd","Type":"ContainerStarted","Data":"1f6ac1e97f55e920c21c072eeadbbb6450b90bfe5bd5506f6a15752b0332258b"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.867604 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c008-account-create-update-cjb9h" event={"ID":"1f95675a-f692-4c29-90da-01eda11003ac","Type":"ContainerStarted","Data":"54b795e324a242b47be3700c2be33ac4d04851fc95a8acea2639174edb259e21"} Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.868136 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:34 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: if [ -n "cinder" ]; then Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="cinder" Jan 29 06:58:34 crc kubenswrapper[4861]: else Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:34 crc kubenswrapper[4861]: fi Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:34 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:34 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:34 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:34 crc kubenswrapper[4861]: # support updates Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.868584 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:34 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: if [ -n "nova_api" ]; then Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="nova_api" Jan 29 06:58:34 crc kubenswrapper[4861]: else Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:34 crc kubenswrapper[4861]: fi Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:34 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:34 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:34 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:34 crc kubenswrapper[4861]: # support updates Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.870983 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-3399-account-create-update-w9zr8" podUID="3949b327-31a1-4dfa-bc04-e13b6c033ecd" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.871028 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-733f-account-create-update-v8gq6" podUID="b7d75aa6-1d18-4901-874c-e6b9db142421" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.871511 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:34 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: if [ -n "glance" ]; then Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="glance" Jan 29 06:58:34 crc kubenswrapper[4861]: else Jan 29 06:58:34 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:34 crc kubenswrapper[4861]: fi Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:34 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:34 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:34 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:34 crc kubenswrapper[4861]: # support updates Jan 29 06:58:34 crc kubenswrapper[4861]: Jan 29 06:58:34 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.872946 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-c008-account-create-update-cjb9h" podUID="1f95675a-f692-4c29-90da-01eda11003ac" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.896629 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-3399-account-create-update-w9zr8"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.896705 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.896881 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerName="glance-log" containerID="cri-o://1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.910654 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerName="glance-httpd" containerID="cri-o://c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.943050 4861 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.943100 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83 is running failed: container process not found" containerID="0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.943172 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts podName:441b7714-dd72-448b-a5a8-b9f56057da43 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:35.443157623 +0000 UTC m=+1407.114652180 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts") pod "root-account-create-update-zh9b7" (UID: "441b7714-dd72-448b-a5a8-b9f56057da43") : configmap "openstack-cell1-scripts" not found Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.951461 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.951841 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerName="glance-log" containerID="cri-o://f4608109881c8d879d0747a15002faa7be33fa42a0ab54b3b737788b5adb25d7" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.951971 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerName="glance-httpd" containerID="cri-o://9dd5b64d9ec144641a738b5aa4db658de3394d4fbf3ece8178a1881822e737bf" gracePeriod=30 Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.952340 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a1432e17-a4e2-4cde-a3d7-89eddf9973e1/ovsdbserver-sb/0.log" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.952395 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.948991 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83 is running failed: container process not found" containerID="0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.962322 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83 is running failed: container process not found" containerID="0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 29 06:58:34 crc kubenswrapper[4861]: E0129 06:58:34.962421 4861 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83 is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="ovsdbserver-sb" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.963682 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_a1432e17-a4e2-4cde-a3d7-89eddf9973e1/ovsdbserver-sb/0.log" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.963755 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"a1432e17-a4e2-4cde-a3d7-89eddf9973e1","Type":"ContainerDied","Data":"93e991e2aeb051baab726270af043589852809398efd615d37fbe49ad5ff6ce4"} Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.963794 4861 scope.go:117] "RemoveContainer" containerID="bdb5e7afda9ae61df28a1d053e02b61d09e0d2ad8787a4805fb51623eb383c1d" Jan 29 06:58:34 crc kubenswrapper[4861]: I0129 06:58:34.982502 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-7lshn"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.000474 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3f8ce486-c345-41aa-b641-b7c4ef27ecfe/ovsdbserver-nb/0.log" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.000555 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.046547 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-scripts\") pod \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.047032 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-combined-ca-bundle\") pod \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.047156 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.055151 4861 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 29 06:58:35 crc kubenswrapper[4861]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 06:58:35 crc kubenswrapper[4861]: + source /usr/local/bin/container-scripts/functions Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNBridge=br-int Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNRemote=tcp:localhost:6642 Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNEncapType=geneve Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNAvailabilityZones= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ EnableChassisAsGateway=true Jan 29 06:58:35 crc kubenswrapper[4861]: ++ PhysicalNetworks= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNHostName= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 06:58:35 crc kubenswrapper[4861]: ++ ovs_dir=/var/lib/openvswitch Jan 29 06:58:35 crc kubenswrapper[4861]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 06:58:35 crc kubenswrapper[4861]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 06:58:35 crc kubenswrapper[4861]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 06:58:35 crc kubenswrapper[4861]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 06:58:35 crc kubenswrapper[4861]: + sleep 0.5 Jan 29 06:58:35 crc kubenswrapper[4861]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 06:58:35 crc kubenswrapper[4861]: + cleanup_ovsdb_server_semaphore Jan 29 06:58:35 crc kubenswrapper[4861]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 06:58:35 crc kubenswrapper[4861]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 06:58:35 crc kubenswrapper[4861]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-6n7w9" message=< Jan 29 06:58:35 crc kubenswrapper[4861]: Exiting ovsdb-server (5) [ OK ] Jan 29 06:58:35 crc kubenswrapper[4861]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 06:58:35 crc kubenswrapper[4861]: + source /usr/local/bin/container-scripts/functions Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNBridge=br-int Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNRemote=tcp:localhost:6642 Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNEncapType=geneve Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNAvailabilityZones= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ EnableChassisAsGateway=true Jan 29 06:58:35 crc kubenswrapper[4861]: ++ PhysicalNetworks= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNHostName= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 06:58:35 crc kubenswrapper[4861]: ++ ovs_dir=/var/lib/openvswitch Jan 29 06:58:35 crc kubenswrapper[4861]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 06:58:35 crc kubenswrapper[4861]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 06:58:35 crc kubenswrapper[4861]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 06:58:35 crc kubenswrapper[4861]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 06:58:35 crc kubenswrapper[4861]: + sleep 0.5 Jan 29 06:58:35 crc kubenswrapper[4861]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 06:58:35 crc kubenswrapper[4861]: + cleanup_ovsdb_server_semaphore Jan 29 06:58:35 crc kubenswrapper[4861]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 06:58:35 crc kubenswrapper[4861]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 06:58:35 crc kubenswrapper[4861]: > Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.055273 4861 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 29 06:58:35 crc kubenswrapper[4861]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 06:58:35 crc kubenswrapper[4861]: + source /usr/local/bin/container-scripts/functions Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNBridge=br-int Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNRemote=tcp:localhost:6642 Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNEncapType=geneve Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNAvailabilityZones= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ EnableChassisAsGateway=true Jan 29 06:58:35 crc kubenswrapper[4861]: ++ PhysicalNetworks= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ OVNHostName= Jan 29 06:58:35 crc kubenswrapper[4861]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 06:58:35 crc kubenswrapper[4861]: ++ ovs_dir=/var/lib/openvswitch Jan 29 06:58:35 crc kubenswrapper[4861]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 06:58:35 crc kubenswrapper[4861]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 06:58:35 crc kubenswrapper[4861]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 06:58:35 crc kubenswrapper[4861]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 06:58:35 crc kubenswrapper[4861]: + sleep 0.5 Jan 29 06:58:35 crc kubenswrapper[4861]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 06:58:35 crc kubenswrapper[4861]: + cleanup_ovsdb_server_semaphore Jan 29 06:58:35 crc kubenswrapper[4861]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 06:58:35 crc kubenswrapper[4861]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 06:58:35 crc kubenswrapper[4861]: > pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" containerID="cri-o://df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.055346 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" containerID="cri-o://df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.055593 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-7lshn"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.055812 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nngmc\" (UniqueName: \"kubernetes.io/projected/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-kube-api-access-nngmc\") pod \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.055866 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdbserver-sb-tls-certs\") pod \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.055935 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-metrics-certs-tls-certs\") pod \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.056183 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdb-rundir\") pod \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.056277 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-config\") pod \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\" (UID: \"a1432e17-a4e2-4cde-a3d7-89eddf9973e1\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.058733 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-config" (OuterVolumeSpecName: "config") pod "a1432e17-a4e2-4cde-a3d7-89eddf9973e1" (UID: "a1432e17-a4e2-4cde-a3d7-89eddf9973e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.058811 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-scripts" (OuterVolumeSpecName: "scripts") pod "a1432e17-a4e2-4cde-a3d7-89eddf9973e1" (UID: "a1432e17-a4e2-4cde-a3d7-89eddf9973e1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.059718 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.056703 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "a1432e17-a4e2-4cde-a3d7-89eddf9973e1" (UID: "a1432e17-a4e2-4cde-a3d7-89eddf9973e1"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.061209 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.061241 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "a1432e17-a4e2-4cde-a3d7-89eddf9973e1" (UID: "a1432e17-a4e2-4cde-a3d7-89eddf9973e1"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.062514 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" containerID="cri-o://f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.107101 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-c008-account-create-update-cjb9h"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.120984 4861 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/neutron-7f868dbfb9-5bdsk" secret="" err="secret \"neutron-neutron-dockercfg-p72xd\" not found" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.134672 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-kube-api-access-nngmc" (OuterVolumeSpecName: "kube-api-access-nngmc") pod "a1432e17-a4e2-4cde-a3d7-89eddf9973e1" (UID: "a1432e17-a4e2-4cde-a3d7-89eddf9973e1"). InnerVolumeSpecName "kube-api-access-nngmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.142192 4861 scope.go:117] "RemoveContainer" containerID="0a56cb413ff502a5c6367982e1ea0d6c8cc4097bc4d17ca704453e8ff1861c83" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.163778 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dr7zk\" (UniqueName: \"kubernetes.io/projected/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-kube-api-access-dr7zk\") pod \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.164513 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-combined-ca-bundle\") pod \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.164921 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdb-rundir\") pod \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.165018 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-config\") pod \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.165048 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.165104 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-metrics-certs-tls-certs\") pod \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.167041 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-scripts\") pod \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.167164 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdbserver-nb-tls-certs\") pod \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\" (UID: \"3f8ce486-c345-41aa-b641-b7c4ef27ecfe\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.167969 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.167989 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nngmc\" (UniqueName: \"kubernetes.io/projected/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-kube-api-access-nngmc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.168001 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.169508 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-scripts" (OuterVolumeSpecName: "scripts") pod "3f8ce486-c345-41aa-b641-b7c4ef27ecfe" (UID: "3f8ce486-c345-41aa-b641-b7c4ef27ecfe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.170865 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "3f8ce486-c345-41aa-b641-b7c4ef27ecfe" (UID: "3f8ce486-c345-41aa-b641-b7c4ef27ecfe"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.170939 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.171014 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data podName:5966cedc-8ab5-4390-906b-c5ac39333e09 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:36.17096787 +0000 UTC m=+1407.842462427 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data") pod "rabbitmq-server-0" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09") : configmap "rabbitmq-config-data" not found Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.181338 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-config" (OuterVolumeSpecName: "config") pod "3f8ce486-c345-41aa-b641-b7c4ef27ecfe" (UID: "3f8ce486-c345-41aa-b641-b7c4ef27ecfe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.181535 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "3f8ce486-c345-41aa-b641-b7c4ef27ecfe" (UID: "3f8ce486-c345-41aa-b641-b7c4ef27ecfe"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.181632 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-kube-api-access-dr7zk" (OuterVolumeSpecName: "kube-api-access-dr7zk") pod "3f8ce486-c345-41aa-b641-b7c4ef27ecfe" (UID: "3f8ce486-c345-41aa-b641-b7c4ef27ecfe"). InnerVolumeSpecName "kube-api-access-dr7zk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.185768 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:35 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: if [ -n "nova_cell0" ]; then Jan 29 06:58:35 crc kubenswrapper[4861]: GRANT_DATABASE="nova_cell0" Jan 29 06:58:35 crc kubenswrapper[4861]: else Jan 29 06:58:35 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:35 crc kubenswrapper[4861]: fi Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:35 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:35 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:35 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:35 crc kubenswrapper[4861]: # support updates Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.187549 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell0-db-secret\\\" not found\"" pod="openstack/nova-cell0-1f01-account-create-update-874c5" podUID="b9c95c6e-e80b-4d39-8209-1dbd1c237351" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.188911 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="037a57cc-64cd-4b10-9c94-c609072db4f3" path="/var/lib/kubelet/pods/037a57cc-64cd-4b10-9c94-c609072db4f3/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.189484 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2285a712-569a-411b-99b9-73d10212c822" path="/var/lib/kubelet/pods/2285a712-569a-411b-99b9-73d10212c822/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.189998 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27c095ab-2da0-40f5-b361-e40819c7b3aa" path="/var/lib/kubelet/pods/27c095ab-2da0-40f5-b361-e40819c7b3aa/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.190522 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34ae275d-679d-40f9-883c-f72b76d821fe" path="/var/lib/kubelet/pods/34ae275d-679d-40f9-883c-f72b76d821fe/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.193610 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="505a3759-b9c9-48ed-b63c-cc0b1e253fe5" path="/var/lib/kubelet/pods/505a3759-b9c9-48ed-b63c-cc0b1e253fe5/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.194303 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74" path="/var/lib/kubelet/pods/597c5a3a-501d-40ab-9e2c-0ed9fb3dcc74/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.194884 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e8fdf87-ff09-46c0-b508-f8f01e57290e" path="/var/lib/kubelet/pods/7e8fdf87-ff09-46c0-b508-f8f01e57290e/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.195961 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c6201c8-50a8-4b95-82e0-b944b78348d6" path="/var/lib/kubelet/pods/8c6201c8-50a8-4b95-82e0-b944b78348d6/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.197093 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0631735-7992-4ca9-8564-2fb8c223a266" path="/var/lib/kubelet/pods/a0631735-7992-4ca9-8564-2fb8c223a266/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.197616 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b743a02f-e9d8-4580-a3fa-230bbfbfea83" path="/var/lib/kubelet/pods/b743a02f-e9d8-4580-a3fa-230bbfbfea83/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.198944 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d200c5c2-f7a9-4db9-b65c-18658065131d" path="/var/lib/kubelet/pods/d200c5c2-f7a9-4db9-b65c-18658065131d/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.199535 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d30fad12-ed5e-4fbf-b668-28ef0e6c204c" path="/var/lib/kubelet/pods/d30fad12-ed5e-4fbf-b668-28ef0e6c204c/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.200044 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e04235da-615a-4098-9192-66537e3d2c8b" path="/var/lib/kubelet/pods/e04235da-615a-4098-9192-66537e3d2c8b/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.200787 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe" path="/var/lib/kubelet/pods/e7c9dac0-6ca3-46e6-bc1a-f76161fa6abe/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.201947 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9" path="/var/lib/kubelet/pods/f0bd3eaf-c6bd-4fa3-8a68-08334f7777b9/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.202486 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1c26938-2f5c-448b-8590-16ce29878d3b" path="/var/lib/kubelet/pods/f1c26938-2f5c-448b-8590-16ce29878d3b/volumes" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.210996 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-98fb-account-create-update-jv6v4"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.211054 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.211109 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-m775k"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.211125 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-m775k"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.211138 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.211334 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-log" containerID="cri-o://0376f68adc682a637da6dfee7b2c102f83dc2d0b2a50def1ae4ad71bb1486b5e" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.211545 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-log" containerID="cri-o://d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.211959 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-api" containerID="cri-o://d3f897c89c2801586a375c91d0d6297c2d965784611ff0abf1834bdaf78b6197" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.212180 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-metadata" containerID="cri-o://114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.213307 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.215299 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.252888 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-c008-account-create-update-cjb9h"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.270227 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.270258 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.271336 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.271358 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.271369 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.271378 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dr7zk\" (UniqueName: \"kubernetes.io/projected/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-kube-api-access-dr7zk\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.271056 4861 secret.go:188] Couldn't get secret openstack/neutron-httpd-config: secret "neutron-httpd-config" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.271168 4861 secret.go:188] Couldn't get secret openstack/neutron-config: secret "neutron-config" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.271865 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:35.771844048 +0000 UTC m=+1407.443338605 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "httpd-config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-httpd-config" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.271915 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:35.771880719 +0000 UTC m=+1407.443375276 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-config" not found Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.274595 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-h8qb2_a871f110-29fe-4e80-b339-5209aebc0652/openstack-network-exporter/0.log" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.274655 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.290349 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-9wjtc"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.300901 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-9wjtc"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.306743 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.309349 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f868dbfb9-5bdsk"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.318923 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-874c5"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.329135 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-9g56k"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.333767 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f8ce486-c345-41aa-b641-b7c4ef27ecfe" (UID: "3f8ce486-c345-41aa-b641-b7c4ef27ecfe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.338179 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.341215 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-0391-account-create-update-jq5xd"] Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.342156 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-fpmbm], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/nova-cell1-0391-account-create-update-jq5xd" podUID="66f8ecd1-e1dd-4663-8681-59fe89a02691" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.345121 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a1432e17-a4e2-4cde-a3d7-89eddf9973e1" (UID: "a1432e17-a4e2-4cde-a3d7-89eddf9973e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.347039 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-9g56k"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.355367 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-g6s94"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.364254 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-g6s94"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.371223 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-733f-account-create-update-v8gq6"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.372267 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovs-rundir\") pod \"a871f110-29fe-4e80-b339-5209aebc0652\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.372350 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "a871f110-29fe-4e80-b339-5209aebc0652" (UID: "a871f110-29fe-4e80-b339-5209aebc0652"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.372434 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a871f110-29fe-4e80-b339-5209aebc0652-config\") pod \"a871f110-29fe-4e80-b339-5209aebc0652\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.372529 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bq2td\" (UniqueName: \"kubernetes.io/projected/a871f110-29fe-4e80-b339-5209aebc0652-kube-api-access-bq2td\") pod \"a871f110-29fe-4e80-b339-5209aebc0652\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.372599 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-combined-ca-bundle\") pod \"a871f110-29fe-4e80-b339-5209aebc0652\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.372633 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-metrics-certs-tls-certs\") pod \"a871f110-29fe-4e80-b339-5209aebc0652\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.372713 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovn-rundir\") pod \"a871f110-29fe-4e80-b339-5209aebc0652\" (UID: \"a871f110-29fe-4e80-b339-5209aebc0652\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.373316 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.373343 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.373355 4861 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovs-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.373367 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.373431 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.373480 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data podName:3b8b1385-123a-4b60-af39-82d6492a65c2 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:37.373462463 +0000 UTC m=+1409.044957020 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data") pod "rabbitmq-cell1-server-0" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2") : configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.373526 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a871f110-29fe-4e80-b339-5209aebc0652-config" (OuterVolumeSpecName: "config") pod "a871f110-29fe-4e80-b339-5209aebc0652" (UID: "a871f110-29fe-4e80-b339-5209aebc0652"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.373574 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "a871f110-29fe-4e80-b339-5209aebc0652" (UID: "a871f110-29fe-4e80-b339-5209aebc0652"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.378436 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a871f110-29fe-4e80-b339-5209aebc0652-kube-api-access-bq2td" (OuterVolumeSpecName: "kube-api-access-bq2td") pod "a871f110-29fe-4e80-b339-5209aebc0652" (UID: "a871f110-29fe-4e80-b339-5209aebc0652"). InnerVolumeSpecName "kube-api-access-bq2td". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.380656 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "3f8ce486-c345-41aa-b641-b7c4ef27ecfe" (UID: "3f8ce486-c345-41aa-b641-b7c4ef27ecfe"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.386752 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-r5hz4"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.395262 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "a1432e17-a4e2-4cde-a3d7-89eddf9973e1" (UID: "a1432e17-a4e2-4cde-a3d7-89eddf9973e1"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.396178 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-20e6-account-create-update-wrx86"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.405172 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-256m5"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.411677 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a871f110-29fe-4e80-b339-5209aebc0652" (UID: "a871f110-29fe-4e80-b339-5209aebc0652"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.412612 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-r5hz4"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.420448 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-256m5"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.426540 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-dfed-account-create-update-t7cnb"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.435051 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-733f-account-create-update-v8gq6"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.435578 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "a1432e17-a4e2-4cde-a3d7-89eddf9973e1" (UID: "a1432e17-a4e2-4cde-a3d7-89eddf9973e1"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.439660 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.439875 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="c4c159fd-7714-4351-8258-437e67ff5dbc" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.444706 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6b8d96575c-7zzfv"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.445345 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6b8d96575c-7zzfv" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerName="barbican-worker-log" containerID="cri-o://75481cbc7bf2e4776643277e100d6f7fcc456f612bd0f4c451db4c8198750b42" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.445481 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6b8d96575c-7zzfv" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerName="barbican-worker" containerID="cri-o://12d75e57461b87b2ec9d6d00c1b304c2545872ff7dcee032f19e27ff512c2516" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.446263 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "3f8ce486-c345-41aa-b641-b7c4ef27ecfe" (UID: "3f8ce486-c345-41aa-b641-b7c4ef27ecfe"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.449983 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d8b667488-v7lmh"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.450387 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-d8b667488-v7lmh" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api-log" containerID="cri-o://9a86c0075a1b39a77b81d03d33eaf3de19430f272ef69df32ef5227eb2cfdfbd" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.450737 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-d8b667488-v7lmh" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api" containerID="cri-o://a95782607794ffb075c46075902d651f0f6db3732fc62ef57c3c4e66ef00c4f4" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.455555 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475037 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-swift-storage-0\") pod \"86dd900f-d608-496c-91b3-be95d914cf58\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475160 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-svc\") pod \"86dd900f-d608-496c-91b3-be95d914cf58\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475196 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-nb\") pod \"86dd900f-d608-496c-91b3-be95d914cf58\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475216 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-sb\") pod \"86dd900f-d608-496c-91b3-be95d914cf58\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475328 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmqvx\" (UniqueName: \"kubernetes.io/projected/86dd900f-d608-496c-91b3-be95d914cf58-kube-api-access-lmqvx\") pod \"86dd900f-d608-496c-91b3-be95d914cf58\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475443 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-config\") pod \"86dd900f-d608-496c-91b3-be95d914cf58\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475799 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpmbm\" (UniqueName: \"kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm\") pod \"nova-cell1-0391-account-create-update-jq5xd\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475867 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475878 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475887 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a871f110-29fe-4e80-b339-5209aebc0652-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475896 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475904 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1432e17-a4e2-4cde-a3d7-89eddf9973e1-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475914 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a871f110-29fe-4e80-b339-5209aebc0652-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475923 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f8ce486-c345-41aa-b641-b7c4ef27ecfe-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.475932 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bq2td\" (UniqueName: \"kubernetes.io/projected/a871f110-29fe-4e80-b339-5209aebc0652-kube-api-access-bq2td\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.483599 4861 projected.go:194] Error preparing data for projected volume kube-api-access-fpmbm for pod openstack/nova-cell1-0391-account-create-update-jq5xd: failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.483656 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm podName:66f8ecd1-e1dd-4663-8681-59fe89a02691 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:37.483640123 +0000 UTC m=+1409.155134680 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-fpmbm" (UniqueName: "kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm") pod "nova-cell1-0391-account-create-update-jq5xd" (UID: "66f8ecd1-e1dd-4663-8681-59fe89a02691") : failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.488223 4861 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.488290 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts podName:441b7714-dd72-448b-a5a8-b9f56057da43 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:36.488274433 +0000 UTC m=+1408.159768980 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts") pod "root-account-create-update-zh9b7" (UID: "441b7714-dd72-448b-a5a8-b9f56057da43") : configmap "openstack-cell1-scripts" not found Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.492222 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zh9b7"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.495310 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86dd900f-d608-496c-91b3-be95d914cf58-kube-api-access-lmqvx" (OuterVolumeSpecName: "kube-api-access-lmqvx") pod "86dd900f-d608-496c-91b3-be95d914cf58" (UID: "86dd900f-d608-496c-91b3-be95d914cf58"). InnerVolumeSpecName "kube-api-access-lmqvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.504189 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-c8ddb67f8-pqqd9"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.504401 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerName="barbican-keystone-listener-log" containerID="cri-o://e098f103ebd71969e2f8e2fd838a304a15e40397ed8a4eba94af458e0afc7a28" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.504804 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerName="barbican-keystone-listener" containerID="cri-o://c312a62932c0664c9e8bde4d291603923f2e2f621322670c3abe137b105616db" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.517111 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zrv47"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.523126 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zrv47"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.523283 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerName="rabbitmq" containerID="cri-o://df85d7b79b6e3d17ea7765b219c520d147b988eede5ce5119c6fe36e62177544" gracePeriod=604800 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.530878 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="a0032cf8-4c45-4a4c-927d-686adba85ab1" containerName="galera" containerID="cri-o://9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.531279 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "a871f110-29fe-4e80-b339-5209aebc0652" (UID: "a871f110-29fe-4e80-b339-5209aebc0652"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.535643 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.535872 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="c749a121-7e8e-4d49-8a30-c27fa21926b5" containerName="nova-cell1-conductor-conductor" containerID="cri-o://928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.548615 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.549033 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="d72b59e5-64c2-4eab-955e-89d6298e834e" containerName="nova-cell0-conductor-conductor" containerID="cri-o://6756abe8a1b0340d3aa8c881cb242ac677f7ceda5ed061f5774b08d04e550a63" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.561187 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nc5kl"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.570499 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-nc5kl"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.577859 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a871f110-29fe-4e80-b339-5209aebc0652-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.577885 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmqvx\" (UniqueName: \"kubernetes.io/projected/86dd900f-d608-496c-91b3-be95d914cf58-kube-api-access-lmqvx\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.587793 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.589267 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.589535 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a39b615c-006f-43b4-9f38-0fe1d0814696" containerName="nova-scheduler-scheduler" containerID="cri-o://10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149" gracePeriod=30 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.607446 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "86dd900f-d608-496c-91b3-be95d914cf58" (UID: "86dd900f-d608-496c-91b3-be95d914cf58"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.620271 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "86dd900f-d608-496c-91b3-be95d914cf58" (UID: "86dd900f-d608-496c-91b3-be95d914cf58"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.674657 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-874c5"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.681798 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "86dd900f-d608-496c-91b3-be95d914cf58" (UID: "86dd900f-d608-496c-91b3-be95d914cf58"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.682263 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "86dd900f-d608-496c-91b3-be95d914cf58" (UID: "86dd900f-d608-496c-91b3-be95d914cf58"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.682518 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmqzt\" (UniqueName: \"kubernetes.io/projected/4c1315db-486b-4b63-bdb0-630c247d49b4-kube-api-access-mmqzt\") pod \"4c1315db-486b-4b63-bdb0-630c247d49b4\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.684363 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-svc\") pod \"86dd900f-d608-496c-91b3-be95d914cf58\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.684411 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-sb\") pod \"86dd900f-d608-496c-91b3-be95d914cf58\" (UID: \"86dd900f-d608-496c-91b3-be95d914cf58\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.684447 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config-secret\") pod \"4c1315db-486b-4b63-bdb0-630c247d49b4\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.684491 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-combined-ca-bundle\") pod \"4c1315db-486b-4b63-bdb0-630c247d49b4\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.684528 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config\") pod \"4c1315db-486b-4b63-bdb0-630c247d49b4\" (UID: \"4c1315db-486b-4b63-bdb0-630c247d49b4\") " Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.685762 4861 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.685783 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: W0129 06:58:35.704123 4861 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/86dd900f-d608-496c-91b3-be95d914cf58/volumes/kubernetes.io~configmap/dns-svc Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.704182 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "86dd900f-d608-496c-91b3-be95d914cf58" (UID: "86dd900f-d608-496c-91b3-be95d914cf58"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: W0129 06:58:35.704309 4861 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/86dd900f-d608-496c-91b3-be95d914cf58/volumes/kubernetes.io~configmap/ovsdbserver-sb Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.704322 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "86dd900f-d608-496c-91b3-be95d914cf58" (UID: "86dd900f-d608-496c-91b3-be95d914cf58"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.709638 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-config" (OuterVolumeSpecName: "config") pod "86dd900f-d608-496c-91b3-be95d914cf58" (UID: "86dd900f-d608-496c-91b3-be95d914cf58"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.715771 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:35 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: if [ -n "barbican" ]; then Jan 29 06:58:35 crc kubenswrapper[4861]: GRANT_DATABASE="barbican" Jan 29 06:58:35 crc kubenswrapper[4861]: else Jan 29 06:58:35 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:35 crc kubenswrapper[4861]: fi Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:35 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:35 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:35 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:35 crc kubenswrapper[4861]: # support updates Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.716907 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-20e6-account-create-update-wrx86" podUID="9a38ce6f-f3ff-4976-8acb-9576d89df924" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.723788 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c1315db-486b-4b63-bdb0-630c247d49b4-kube-api-access-mmqzt" (OuterVolumeSpecName: "kube-api-access-mmqzt") pod "4c1315db-486b-4b63-bdb0-630c247d49b4" (UID: "4c1315db-486b-4b63-bdb0-630c247d49b4"). InnerVolumeSpecName "kube-api-access-mmqzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.744625 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:35 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: if [ -n "neutron" ]; then Jan 29 06:58:35 crc kubenswrapper[4861]: GRANT_DATABASE="neutron" Jan 29 06:58:35 crc kubenswrapper[4861]: else Jan 29 06:58:35 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:35 crc kubenswrapper[4861]: fi Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:35 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:35 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:35 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:35 crc kubenswrapper[4861]: # support updates Jan 29 06:58:35 crc kubenswrapper[4861]: Jan 29 06:58:35 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.744714 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "4c1315db-486b-4b63-bdb0-630c247d49b4" (UID: "4c1315db-486b-4b63-bdb0-630c247d49b4"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.746441 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c1315db-486b-4b63-bdb0-630c247d49b4" (UID: "4c1315db-486b-4b63-bdb0-630c247d49b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.752608 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"neutron-db-secret\\\" not found\"" pod="openstack/neutron-dfed-account-create-update-t7cnb" podUID="46d0aaff-25ca-4605-9b55-ee1f5a897ff2" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.763992 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.787721 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmqzt\" (UniqueName: \"kubernetes.io/projected/4c1315db-486b-4b63-bdb0-630c247d49b4-kube-api-access-mmqzt\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.787918 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.787974 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.788082 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86dd900f-d608-496c-91b3-be95d914cf58-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.788137 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.788187 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.788321 4861 secret.go:188] Couldn't get secret openstack/neutron-httpd-config: secret "neutron-httpd-config" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.788417 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:36.788399256 +0000 UTC m=+1408.459893823 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "httpd-config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-httpd-config" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.788417 4861 secret.go:188] Couldn't get secret openstack/neutron-config: secret "neutron-config" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.788544 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:36.788537399 +0000 UTC m=+1408.460031956 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-config" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.788348 4861 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.788654 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts podName:66f8ecd1-e1dd-4663-8681-59fe89a02691 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:37.788648052 +0000 UTC m=+1409.460142599 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts") pod "nova-cell1-0391-account-create-update-jq5xd" (UID: "66f8ecd1-e1dd-4663-8681-59fe89a02691") : configmap "openstack-cell1-scripts" not found Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.812582 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "4c1315db-486b-4b63-bdb0-630c247d49b4" (UID: "4c1315db-486b-4b63-bdb0-630c247d49b4"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.814012 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerName="rabbitmq" containerID="cri-o://b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777" gracePeriod=604800 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.818793 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-20e6-account-create-update-wrx86"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.845940 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-dfed-account-create-update-t7cnb"] Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.904318 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4c1315db-486b-4b63-bdb0-630c247d49b4-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.919637 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.957823 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.959522 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.959912 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.959976 4861 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.960243 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.964676 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.978578 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:35 crc kubenswrapper[4861]: E0129 06:58:35.978645 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.990978 4861 generic.go:334] "Generic (PLEG): container finished" podID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerID="1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483" exitCode=143 Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.991130 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5ce76094-c71f-46c7-a69d-7d30d8540c5a","Type":"ContainerDied","Data":"1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483"} Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.994759 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-h8qb2_a871f110-29fe-4e80-b339-5209aebc0652/openstack-network-exporter/0.log" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.994944 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-h8qb2" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.994948 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-h8qb2" event={"ID":"a871f110-29fe-4e80-b339-5209aebc0652","Type":"ContainerDied","Data":"5ad21a9e6631c281c20796d9b9df214a5eaa20b832d5832f99725eaa33aff7c0"} Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.995063 4861 scope.go:117] "RemoveContainer" containerID="96077c6516cf385f97eadb5e32fbb191b2f9052a56daddb824c1bc8fcead61a3" Jan 29 06:58:35 crc kubenswrapper[4861]: I0129 06:58:35.998660 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.000730 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" event={"ID":"86dd900f-d608-496c-91b3-be95d914cf58","Type":"ContainerDied","Data":"3e3440ea939dd2d2b29feb8067c587d14bde6fe199c73836f73fcee94a17f3a4"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.000864 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcd6f8f8f-ktt88" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.022685 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-733f-account-create-update-v8gq6" event={"ID":"b7d75aa6-1d18-4901-874c-e6b9db142421","Type":"ContainerStarted","Data":"53a6286b6d02178a4f0d214f3a7fcc544ec3d450f22f31162083f4d0efba00bc"} Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.025722 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:36 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: if [ -n "nova_api" ]; then Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="nova_api" Jan 29 06:58:36 crc kubenswrapper[4861]: else Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:36 crc kubenswrapper[4861]: fi Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:36 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:36 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:36 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:36 crc kubenswrapper[4861]: # support updates Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.026964 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-733f-account-create-update-v8gq6" podUID="b7d75aa6-1d18-4901-874c-e6b9db142421" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.030496 4861 generic.go:334] "Generic (PLEG): container finished" podID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerID="0376f68adc682a637da6dfee7b2c102f83dc2d0b2a50def1ae4ad71bb1486b5e" exitCode=143 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.030534 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e78be7f2-60d4-4f0e-a510-bf5e652110d1","Type":"ContainerDied","Data":"0376f68adc682a637da6dfee7b2c102f83dc2d0b2a50def1ae4ad71bb1486b5e"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.032650 4861 generic.go:334] "Generic (PLEG): container finished" podID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerID="403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b" exitCode=143 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.032714 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6957679874-pnq22" event={"ID":"4b488de3-67a5-49cf-a61a-37a44acbbe19","Type":"ContainerDied","Data":"403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.033958 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-20e6-account-create-update-wrx86" event={"ID":"9a38ce6f-f3ff-4976-8acb-9576d89df924","Type":"ContainerStarted","Data":"747a0b1ad7e7d75066d134854625ccebc73955b112d464496fc17e9fbd429840"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.035952 4861 generic.go:334] "Generic (PLEG): container finished" podID="4c1315db-486b-4b63-bdb0-630c247d49b4" containerID="46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b" exitCode=137 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.036051 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.039209 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3f8ce486-c345-41aa-b641-b7c4ef27ecfe/ovsdbserver-nb/0.log" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.039281 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3f8ce486-c345-41aa-b641-b7c4ef27ecfe","Type":"ContainerDied","Data":"2d43122e89fc2f3b86af7df1e43cb42866a149cfc1bda0377adcec5c856c1fd4"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.041350 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f01-account-create-update-874c5" event={"ID":"b9c95c6e-e80b-4d39-8209-1dbd1c237351","Type":"ContainerStarted","Data":"e010289644a0b068915e18a6424eccc7213da7b01216d521e37e07734771ba04"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.042976 4861 generic.go:334] "Generic (PLEG): container finished" podID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerID="f4608109881c8d879d0747a15002faa7be33fa42a0ab54b3b737788b5adb25d7" exitCode=143 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.043021 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10b22efc-707a-4ffc-8edc-44c39900ba2b","Type":"ContainerDied","Data":"f4608109881c8d879d0747a15002faa7be33fa42a0ab54b3b737788b5adb25d7"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.057702 4861 generic.go:334] "Generic (PLEG): container finished" podID="947c222c-8f0c-423f-84e8-75a4b9322829" containerID="9a86c0075a1b39a77b81d03d33eaf3de19430f272ef69df32ef5227eb2cfdfbd" exitCode=143 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.057802 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d8b667488-v7lmh" event={"ID":"947c222c-8f0c-423f-84e8-75a4b9322829","Type":"ContainerDied","Data":"9a86c0075a1b39a77b81d03d33eaf3de19430f272ef69df32ef5227eb2cfdfbd"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.059874 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.060153 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.066497 4861 generic.go:334] "Generic (PLEG): container finished" podID="633f63c1-539f-4477-8aae-d6731a514280" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.066621 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6n7w9" event={"ID":"633f63c1-539f-4477-8aae-d6731a514280","Type":"ContainerDied","Data":"df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.071307 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dfed-account-create-update-t7cnb" event={"ID":"46d0aaff-25ca-4605-9b55-ee1f5a897ff2","Type":"ContainerStarted","Data":"4e904b1d6f6c67ab4c014f25208641b097dfff5acf1a24a287e5926eb5396c3d"} Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.078607 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:36 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: if [ -n "nova_cell0" ]; then Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="nova_cell0" Jan 29 06:58:36 crc kubenswrapper[4861]: else Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:36 crc kubenswrapper[4861]: fi Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:36 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:36 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:36 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:36 crc kubenswrapper[4861]: # support updates Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.079673 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell0-db-secret\\\" not found\"" pod="openstack/nova-cell0-1f01-account-create-update-874c5" podUID="b9c95c6e-e80b-4d39-8209-1dbd1c237351" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115758 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="849976197be27f3f0414f54d8c975813716a50cdd59b37975b4eb4bb0b453c69" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115809 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="80b76bbf5574a1ffe9a28896fadf09a48689fb5bb78991c8c124528c6850d0ee" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115816 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="300cdcb844a68c46fd719e6be6e862e7b417f885d1ab7289bf038801298b0951" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115823 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="e2a3b495086295e31b7ed56c3d2932e3f985fccd26d8e9e239e77653b59a0d32" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115829 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="7fed2197542cb4f3117973c4387005866a5b3aa792d7b6f414b399fca8226503" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115837 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="696299cd0fa4bb5069c2910a6be63baa743730b8326a70bb3ffd8aa9d1c825ec" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115864 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="3281266ddbd401b2f04a7cb7e231cd35c5bced4b7f65472c79c6cab82698c818" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115871 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="18d6ec1b3d371c36c925fb4104455a8183e0a1995e0abd435a9954ffab121835" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115877 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="42fe23b69a4684b68ede63233c8ea85578f5383ad1505896e099548f6e44a6ea" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115883 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="5f0359b2c69c9a01c0a74bbb8ecc34b7cb21acbd0a142f267a70aaf243d0d4d1" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115889 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="f247857b6eb8ade45650fa7fc5c2b6bff1ac506097b24f9f3cdf86be8a43d2d4" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115895 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="c2889e92275d93552a69c4569021d1f48b14b5ad80332e996fa65c8fc322719d" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115901 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="90931a6cfdb8a44357367186d2c4396fd4c9ac22d948ca358a02706b89784468" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115907 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="27fc60fdd9d503cf21c40b0704ab3f668d5965d81d74cbbc4c3aa6e2ce528d23" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115969 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"849976197be27f3f0414f54d8c975813716a50cdd59b37975b4eb4bb0b453c69"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.115994 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"80b76bbf5574a1ffe9a28896fadf09a48689fb5bb78991c8c124528c6850d0ee"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.116048 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"300cdcb844a68c46fd719e6be6e862e7b417f885d1ab7289bf038801298b0951"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.116062 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"e2a3b495086295e31b7ed56c3d2932e3f985fccd26d8e9e239e77653b59a0d32"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.119985 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"7fed2197542cb4f3117973c4387005866a5b3aa792d7b6f414b399fca8226503"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120104 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"696299cd0fa4bb5069c2910a6be63baa743730b8326a70bb3ffd8aa9d1c825ec"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120117 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"3281266ddbd401b2f04a7cb7e231cd35c5bced4b7f65472c79c6cab82698c818"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120125 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"18d6ec1b3d371c36c925fb4104455a8183e0a1995e0abd435a9954ffab121835"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120133 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"42fe23b69a4684b68ede63233c8ea85578f5383ad1505896e099548f6e44a6ea"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120141 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"5f0359b2c69c9a01c0a74bbb8ecc34b7cb21acbd0a142f267a70aaf243d0d4d1"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120150 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"f247857b6eb8ade45650fa7fc5c2b6bff1ac506097b24f9f3cdf86be8a43d2d4"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120158 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"c2889e92275d93552a69c4569021d1f48b14b5ad80332e996fa65c8fc322719d"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120168 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"90931a6cfdb8a44357367186d2c4396fd4c9ac22d948ca358a02706b89784468"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.120178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"27fc60fdd9d503cf21c40b0704ab3f668d5965d81d74cbbc4c3aa6e2ce528d23"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.143041 4861 generic.go:334] "Generic (PLEG): container finished" podID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerID="e098f103ebd71969e2f8e2fd838a304a15e40397ed8a4eba94af458e0afc7a28" exitCode=143 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.143178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" event={"ID":"bd9ed061-0329-42e0-8cca-e7b560c7a19c","Type":"ContainerDied","Data":"e098f103ebd71969e2f8e2fd838a304a15e40397ed8a4eba94af458e0afc7a28"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.160815 4861 generic.go:334] "Generic (PLEG): container finished" podID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerID="75481cbc7bf2e4776643277e100d6f7fcc456f612bd0f4c451db4c8198750b42" exitCode=143 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.160939 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b8d96575c-7zzfv" event={"ID":"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3","Type":"ContainerDied","Data":"75481cbc7bf2e4776643277e100d6f7fcc456f612bd0f4c451db4c8198750b42"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.167743 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-h8qb2"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.179621 4861 generic.go:334] "Generic (PLEG): container finished" podID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerID="9fd75cdc74ab09059f227b09ddff0f8e7c83f3521d26bb3444075ede07ae852b" exitCode=0 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.179712 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b1c31da3-c703-4d07-82e5-b02fe841a548","Type":"ContainerDied","Data":"9fd75cdc74ab09059f227b09ddff0f8e7c83f3521d26bb3444075ede07ae852b"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.179801 4861 scope.go:117] "RemoveContainer" containerID="a8543a52cd59c6b1f2aa0887f12875b0d96e617f0a1ae249e3c7261009db9ece" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.182128 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-h8qb2"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.183328 4861 generic.go:334] "Generic (PLEG): container finished" podID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerID="1040f23ce9435abf9d98fe86eda2bd1c172d7b64d769b973274d71099bd7ad84" exitCode=143 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.183406 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6c25bacb-4105-4fa4-a798-117f9cbe75fe","Type":"ContainerDied","Data":"1040f23ce9435abf9d98fe86eda2bd1c172d7b64d769b973274d71099bd7ad84"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.191481 4861 generic.go:334] "Generic (PLEG): container finished" podID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerID="d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea" exitCode=143 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.191895 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.192602 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed","Type":"ContainerDied","Data":"d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea"} Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.194743 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f868dbfb9-5bdsk" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-api" containerID="cri-o://ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e" gracePeriod=30 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.194810 4861 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-zh9b7" secret="" err="secret \"galera-openstack-cell1-dockercfg-dt4gs\" not found" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.194837 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f868dbfb9-5bdsk" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-httpd" containerID="cri-o://fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2" gracePeriod=30 Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.195684 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:36 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: if [ -n "cinder" ]; then Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="cinder" Jan 29 06:58:36 crc kubenswrapper[4861]: else Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:36 crc kubenswrapper[4861]: fi Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:36 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:36 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:36 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:36 crc kubenswrapper[4861]: # support updates Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.195969 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-58dc7dd48c-t4mkl"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.196172 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-httpd" containerID="cri-o://6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd" gracePeriod=30 Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.196268 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-server" containerID="cri-o://81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499" gracePeriod=30 Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.196812 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-3399-account-create-update-w9zr8" podUID="3949b327-31a1-4dfa-bc04-e13b6c033ecd" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.196894 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:36 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: if [ -n "placement" ]; then Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="placement" Jan 29 06:58:36 crc kubenswrapper[4861]: else Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:36 crc kubenswrapper[4861]: fi Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:36 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:36 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:36 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:36 crc kubenswrapper[4861]: # support updates Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.201215 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"placement-db-secret\\\" not found\"" pod="openstack/placement-98fb-account-create-update-jv6v4" podUID="66a7721f-92e9-498c-97b8-cbe9890220d9" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.201967 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:36 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: if [ -n "glance" ]; then Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="glance" Jan 29 06:58:36 crc kubenswrapper[4861]: else Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:36 crc kubenswrapper[4861]: fi Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:36 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:36 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:36 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:36 crc kubenswrapper[4861]: # support updates Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.203502 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-c008-account-create-update-cjb9h" podUID="1f95675a-f692-4c29-90da-01eda11003ac" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.204519 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.211995 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.215451 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data podName:5966cedc-8ab5-4390-906b-c5ac39333e09 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:38.215432218 +0000 UTC m=+1409.886926785 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data") pod "rabbitmq-server-0" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09") : configmap "rabbitmq-config-data" not found Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.215045 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-ktt88"] Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.203721 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:36 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: if [ -n "" ]; then Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="" Jan 29 06:58:36 crc kubenswrapper[4861]: else Jan 29 06:58:36 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:36 crc kubenswrapper[4861]: fi Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:36 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:36 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:36 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:36 crc kubenswrapper[4861]: # support updates Jan 29 06:58:36 crc kubenswrapper[4861]: Jan 29 06:58:36 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.219703 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-zh9b7" podUID="441b7714-dd72-448b-a5a8-b9f56057da43" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.226143 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-ktt88"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.234379 4861 scope.go:117] "RemoveContainer" containerID="7f360a109c4ea9a81863b2378bb3244474ecea22c079e1bd431ea1270e44d30d" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.253693 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.257457 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.279906 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.303235 4861 scope.go:117] "RemoveContainer" containerID="46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.313689 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts\") pod \"66f8ecd1-e1dd-4663-8681-59fe89a02691\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.317657 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "66f8ecd1-e1dd-4663-8681-59fe89a02691" (UID: "66f8ecd1-e1dd-4663-8681-59fe89a02691"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.318807 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.320448 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.418668 4861 scope.go:117] "RemoveContainer" containerID="46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.418894 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f8ecd1-e1dd-4663-8681-59fe89a02691-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.424616 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b\": container with ID starting with 46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b not found: ID does not exist" containerID="46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.424665 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b"} err="failed to get container status \"46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b\": rpc error: code = NotFound desc = could not find container \"46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b\": container with ID starting with 46947e6a367abb60467e69396f925eb8cfd9544c8350f95e3142b3cd3cddbd8b not found: ID does not exist" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.424695 4861 scope.go:117] "RemoveContainer" containerID="162f0b5fe401e18a951e9523bbee70e5964f24419ec2081475b2ab90051cf4b6" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.506755 4861 scope.go:117] "RemoveContainer" containerID="99b14fb198d584fcdb36a249a3809f469be4cd3e4644d253366d9f932150b9eb" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.521623 4861 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.521688 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts podName:441b7714-dd72-448b-a5a8-b9f56057da43 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:38.521669707 +0000 UTC m=+1410.193164264 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts") pod "root-account-create-update-zh9b7" (UID: "441b7714-dd72-448b-a5a8-b9f56057da43") : configmap "openstack-cell1-scripts" not found Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.733625 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wczkh"] Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.752142 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.762089 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.780021 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.831563 4861 secret.go:188] Couldn't get secret openstack/neutron-httpd-config: secret "neutron-httpd-config" not found Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.831623 4861 secret.go:188] Couldn't get secret openstack/neutron-config: secret "neutron-config" not found Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.831639 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:38.831622493 +0000 UTC m=+1410.503117050 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "httpd-config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-httpd-config" not found Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.831682 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:38.831666324 +0000 UTC m=+1410.503160871 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-config" not found Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.897955 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.901300 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.904043 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:58:36 crc kubenswrapper[4861]: E0129 06:58:36.904093 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="c749a121-7e8e-4d49-8a30-c27fa21926b5" containerName="nova-cell1-conductor-conductor" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.932979 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fll29\" (UniqueName: \"kubernetes.io/projected/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-kube-api-access-fll29\") pod \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\" (UID: \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.933035 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a38ce6f-f3ff-4976-8acb-9576d89df924-operator-scripts\") pod \"9a38ce6f-f3ff-4976-8acb-9576d89df924\" (UID: \"9a38ce6f-f3ff-4976-8acb-9576d89df924\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.933054 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-combined-ca-bundle\") pod \"c4c159fd-7714-4351-8258-437e67ff5dbc\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.933115 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2dg4\" (UniqueName: \"kubernetes.io/projected/c4c159fd-7714-4351-8258-437e67ff5dbc-kube-api-access-p2dg4\") pod \"c4c159fd-7714-4351-8258-437e67ff5dbc\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.933309 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-config-data\") pod \"c4c159fd-7714-4351-8258-437e67ff5dbc\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.933331 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-operator-scripts\") pod \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\" (UID: \"46d0aaff-25ca-4605-9b55-ee1f5a897ff2\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.933599 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-nova-novncproxy-tls-certs\") pod \"c4c159fd-7714-4351-8258-437e67ff5dbc\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.933755 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmgqg\" (UniqueName: \"kubernetes.io/projected/9a38ce6f-f3ff-4976-8acb-9576d89df924-kube-api-access-gmgqg\") pod \"9a38ce6f-f3ff-4976-8acb-9576d89df924\" (UID: \"9a38ce6f-f3ff-4976-8acb-9576d89df924\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.934015 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-vencrypt-tls-certs\") pod \"c4c159fd-7714-4351-8258-437e67ff5dbc\" (UID: \"c4c159fd-7714-4351-8258-437e67ff5dbc\") " Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.934460 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a38ce6f-f3ff-4976-8acb-9576d89df924-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9a38ce6f-f3ff-4976-8acb-9576d89df924" (UID: "9a38ce6f-f3ff-4976-8acb-9576d89df924"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.934586 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "46d0aaff-25ca-4605-9b55-ee1f5a897ff2" (UID: "46d0aaff-25ca-4605-9b55-ee1f5a897ff2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.935745 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a38ce6f-f3ff-4976-8acb-9576d89df924-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.935769 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.948922 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4c159fd-7714-4351-8258-437e67ff5dbc-kube-api-access-p2dg4" (OuterVolumeSpecName: "kube-api-access-p2dg4") pod "c4c159fd-7714-4351-8258-437e67ff5dbc" (UID: "c4c159fd-7714-4351-8258-437e67ff5dbc"). InnerVolumeSpecName "kube-api-access-p2dg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.951520 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a38ce6f-f3ff-4976-8acb-9576d89df924-kube-api-access-gmgqg" (OuterVolumeSpecName: "kube-api-access-gmgqg") pod "9a38ce6f-f3ff-4976-8acb-9576d89df924" (UID: "9a38ce6f-f3ff-4976-8acb-9576d89df924"). InnerVolumeSpecName "kube-api-access-gmgqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:36 crc kubenswrapper[4861]: I0129 06:58:36.962702 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-kube-api-access-fll29" (OuterVolumeSpecName: "kube-api-access-fll29") pod "46d0aaff-25ca-4605-9b55-ee1f5a897ff2" (UID: "46d0aaff-25ca-4605-9b55-ee1f5a897ff2"). InnerVolumeSpecName "kube-api-access-fll29". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.002711 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-config-data" (OuterVolumeSpecName: "config-data") pod "c4c159fd-7714-4351-8258-437e67ff5dbc" (UID: "c4c159fd-7714-4351-8258-437e67ff5dbc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.002800 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c4c159fd-7714-4351-8258-437e67ff5dbc" (UID: "c4c159fd-7714-4351-8258-437e67ff5dbc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.022359 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.035510 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "c4c159fd-7714-4351-8258-437e67ff5dbc" (UID: "c4c159fd-7714-4351-8258-437e67ff5dbc"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.039662 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmgqg\" (UniqueName: \"kubernetes.io/projected/9a38ce6f-f3ff-4976-8acb-9576d89df924-kube-api-access-gmgqg\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.039699 4861 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.039713 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fll29\" (UniqueName: \"kubernetes.io/projected/46d0aaff-25ca-4605-9b55-ee1f5a897ff2-kube-api-access-fll29\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.039724 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.039735 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2dg4\" (UniqueName: \"kubernetes.io/projected/c4c159fd-7714-4351-8258-437e67ff5dbc-kube-api-access-p2dg4\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.039746 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.065899 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "c4c159fd-7714-4351-8258-437e67ff5dbc" (UID: "c4c159fd-7714-4351-8258-437e67ff5dbc"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.086516 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.129023 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="081e4fcd-2dd7-4e2b-b276-28a0ec4f7110" path="/var/lib/kubelet/pods/081e4fcd-2dd7-4e2b-b276-28a0ec4f7110/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.130290 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ed744f8-d389-4054-8bce-7a1f7b1be71f" path="/var/lib/kubelet/pods/1ed744f8-d389-4054-8bce-7a1f7b1be71f/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.130784 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25104968-4e44-41eb-be19-a88e17384e57" path="/var/lib/kubelet/pods/25104968-4e44-41eb-be19-a88e17384e57/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.133672 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" path="/var/lib/kubelet/pods/3f8ce486-c345-41aa-b641-b7c4ef27ecfe/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.134244 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c1315db-486b-4b63-bdb0-630c247d49b4" path="/var/lib/kubelet/pods/4c1315db-486b-4b63-bdb0-630c247d49b4/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.134692 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86dd900f-d608-496c-91b3-be95d914cf58" path="/var/lib/kubelet/pods/86dd900f-d608-496c-91b3-be95d914cf58/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.135654 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ee543aa-5e49-42f8-85c3-50911ec59b03" path="/var/lib/kubelet/pods/8ee543aa-5e49-42f8-85c3-50911ec59b03/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.136136 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d56467d-6815-4f11-9b52-06cd32a818ab" path="/var/lib/kubelet/pods/9d56467d-6815-4f11-9b52-06cd32a818ab/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.136689 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" path="/var/lib/kubelet/pods/a1432e17-a4e2-4cde-a3d7-89eddf9973e1/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.137671 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a871f110-29fe-4e80-b339-5209aebc0652" path="/var/lib/kubelet/pods/a871f110-29fe-4e80-b339-5209aebc0652/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.139475 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba" path="/var/lib/kubelet/pods/b1ee1ab9-e4b7-47d9-ad86-c05c7c1494ba/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.139984 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4785f86-012c-4e4f-98aa-0e4334a923bc" path="/var/lib/kubelet/pods/d4785f86-012c-4e4f-98aa-0e4334a923bc/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.140868 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-kolla-config\") pod \"a0032cf8-4c45-4a4c-927d-686adba85ab1\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.140952 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-galera-tls-certs\") pod \"a0032cf8-4c45-4a4c-927d-686adba85ab1\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.141004 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-generated\") pod \"a0032cf8-4c45-4a4c-927d-686adba85ab1\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.141055 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cf4z\" (UniqueName: \"kubernetes.io/projected/a0032cf8-4c45-4a4c-927d-686adba85ab1-kube-api-access-5cf4z\") pod \"a0032cf8-4c45-4a4c-927d-686adba85ab1\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.141159 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"a0032cf8-4c45-4a4c-927d-686adba85ab1\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.141242 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-default\") pod \"a0032cf8-4c45-4a4c-927d-686adba85ab1\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.141366 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-combined-ca-bundle\") pod \"a0032cf8-4c45-4a4c-927d-686adba85ab1\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.141399 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-operator-scripts\") pod \"a0032cf8-4c45-4a4c-927d-686adba85ab1\" (UID: \"a0032cf8-4c45-4a4c-927d-686adba85ab1\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.141546 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "a0032cf8-4c45-4a4c-927d-686adba85ab1" (UID: "a0032cf8-4c45-4a4c-927d-686adba85ab1"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.141860 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "a0032cf8-4c45-4a4c-927d-686adba85ab1" (UID: "a0032cf8-4c45-4a4c-927d-686adba85ab1"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.142018 4861 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.142036 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.142053 4861 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c159fd-7714-4351-8258-437e67ff5dbc-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.142330 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd03c3d0-dbd6-487e-90c9-9b58458a7cf2" path="/var/lib/kubelet/pods/fd03c3d0-dbd6-487e-90c9-9b58458a7cf2/volumes" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.142596 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "a0032cf8-4c45-4a4c-927d-686adba85ab1" (UID: "a0032cf8-4c45-4a4c-927d-686adba85ab1"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.147616 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a0032cf8-4c45-4a4c-927d-686adba85ab1" (UID: "a0032cf8-4c45-4a4c-927d-686adba85ab1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.152792 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.152983 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0032cf8-4c45-4a4c-927d-686adba85ab1-kube-api-access-5cf4z" (OuterVolumeSpecName: "kube-api-access-5cf4z") pod "a0032cf8-4c45-4a4c-927d-686adba85ab1" (UID: "a0032cf8-4c45-4a4c-927d-686adba85ab1"). InnerVolumeSpecName "kube-api-access-5cf4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.173452 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "mysql-db") pod "a0032cf8-4c45-4a4c-927d-686adba85ab1" (UID: "a0032cf8-4c45-4a4c-927d-686adba85ab1"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.175524 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.213783 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0032cf8-4c45-4a4c-927d-686adba85ab1" (UID: "a0032cf8-4c45-4a4c-927d-686adba85ab1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.239675 4861 generic.go:334] "Generic (PLEG): container finished" podID="a39b615c-006f-43b4-9f38-0fe1d0814696" containerID="10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149" exitCode=0 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.239813 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.239825 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a39b615c-006f-43b4-9f38-0fe1d0814696","Type":"ContainerDied","Data":"10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.239872 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a39b615c-006f-43b4-9f38-0fe1d0814696","Type":"ContainerDied","Data":"dc165dd5df1f6d54ffd77bbe445c5b305514d69159bdd24d25e717fe0c7f5126"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.239899 4861 scope.go:117] "RemoveContainer" containerID="10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.243542 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvjjs\" (UniqueName: \"kubernetes.io/projected/a39b615c-006f-43b4-9f38-0fe1d0814696-kube-api-access-xvjjs\") pod \"a39b615c-006f-43b4-9f38-0fe1d0814696\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.243588 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-config-data\") pod \"a39b615c-006f-43b4-9f38-0fe1d0814696\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.243782 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-combined-ca-bundle\") pod \"a39b615c-006f-43b4-9f38-0fe1d0814696\" (UID: \"a39b615c-006f-43b4-9f38-0fe1d0814696\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.244712 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cf4z\" (UniqueName: \"kubernetes.io/projected/a0032cf8-4c45-4a4c-927d-686adba85ab1-kube-api-access-5cf4z\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.244743 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.244753 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.244762 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.244774 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0032cf8-4c45-4a4c-927d-686adba85ab1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.250661 4861 generic.go:334] "Generic (PLEG): container finished" podID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerID="81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499" exitCode=0 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.250711 4861 generic.go:334] "Generic (PLEG): container finished" podID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerID="6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd" exitCode=0 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.250791 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" event={"ID":"1aa0d0b6-7731-421f-ac34-43cfd70e808c","Type":"ContainerDied","Data":"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.250820 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" event={"ID":"1aa0d0b6-7731-421f-ac34-43cfd70e808c","Type":"ContainerDied","Data":"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.250833 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" event={"ID":"1aa0d0b6-7731-421f-ac34-43cfd70e808c","Type":"ContainerDied","Data":"bd42d5dd4244b8ae1a37a2d77ea5c3e74a44a32ed9d07c672f19264bf349ac12"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.250860 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-58dc7dd48c-t4mkl" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.251694 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a39b615c-006f-43b4-9f38-0fe1d0814696-kube-api-access-xvjjs" (OuterVolumeSpecName: "kube-api-access-xvjjs") pod "a39b615c-006f-43b4-9f38-0fe1d0814696" (UID: "a39b615c-006f-43b4-9f38-0fe1d0814696"). InnerVolumeSpecName "kube-api-access-xvjjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.274674 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "a0032cf8-4c45-4a4c-927d-686adba85ab1" (UID: "a0032cf8-4c45-4a4c-927d-686adba85ab1"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.275050 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.277830 4861 generic.go:334] "Generic (PLEG): container finished" podID="a0032cf8-4c45-4a4c-927d-686adba85ab1" containerID="9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3" exitCode=0 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.277897 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a0032cf8-4c45-4a4c-927d-686adba85ab1","Type":"ContainerDied","Data":"9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.277923 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a0032cf8-4c45-4a4c-927d-686adba85ab1","Type":"ContainerDied","Data":"d5fb90236084102c2f246f84c52584717d4f7ade6757e8ede8b718d527e39130"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.278018 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.286293 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-config-data" (OuterVolumeSpecName: "config-data") pod "a39b615c-006f-43b4-9f38-0fe1d0814696" (UID: "a39b615c-006f-43b4-9f38-0fe1d0814696"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.315914 4861 generic.go:334] "Generic (PLEG): container finished" podID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerID="fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2" exitCode=0 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.316003 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f868dbfb9-5bdsk" event={"ID":"ce4279f2-eded-42d5-9353-5235a6b7d64e","Type":"ContainerDied","Data":"fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.317103 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-20e6-account-create-update-wrx86" event={"ID":"9a38ce6f-f3ff-4976-8acb-9576d89df924","Type":"ContainerDied","Data":"747a0b1ad7e7d75066d134854625ccebc73955b112d464496fc17e9fbd429840"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.317166 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-20e6-account-create-update-wrx86" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.328519 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dfed-account-create-update-t7cnb" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.328545 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dfed-account-create-update-t7cnb" event={"ID":"46d0aaff-25ca-4605-9b55-ee1f5a897ff2","Type":"ContainerDied","Data":"4e904b1d6f6c67ab4c014f25208641b097dfff5acf1a24a287e5926eb5396c3d"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.344130 4861 scope.go:117] "RemoveContainer" containerID="10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.345375 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149\": container with ID starting with 10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149 not found: ID does not exist" containerID="10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.345406 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149"} err="failed to get container status \"10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149\": rpc error: code = NotFound desc = could not find container \"10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149\": container with ID starting with 10ed8e268bfa3d7cff65a3ffcc90b999ebf79b6533bec4ad00e33bacb469a149 not found: ID does not exist" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.345423 4861 scope.go:117] "RemoveContainer" containerID="81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.346299 4861 generic.go:334] "Generic (PLEG): container finished" podID="c4c159fd-7714-4351-8258-437e67ff5dbc" containerID="004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95" exitCode=0 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.346585 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a39b615c-006f-43b4-9f38-0fe1d0814696" (UID: "a39b615c-006f-43b4-9f38-0fe1d0814696"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.346698 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-internal-tls-certs\") pod \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.346736 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-combined-ca-bundle\") pod \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.346837 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-log-httpd\") pod \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.346915 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2x47v\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-kube-api-access-2x47v\") pod \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.346924 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.346958 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-run-httpd\") pod \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.347534 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.347629 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-public-tls-certs\") pod \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.347709 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-etc-swift\") pod \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.347746 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-config-data\") pod \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\" (UID: \"1aa0d0b6-7731-421f-ac34-43cfd70e808c\") " Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.347765 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wczkh" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="registry-server" containerID="cri-o://380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9" gracePeriod=2 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.347852 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c4c159fd-7714-4351-8258-437e67ff5dbc","Type":"ContainerDied","Data":"004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.347883 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c4c159fd-7714-4351-8258-437e67ff5dbc","Type":"ContainerDied","Data":"e96e540b995c3e31ec41237d45d3ac474c5a0652535800c89d9c23913516e2c2"} Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.355339 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvjjs\" (UniqueName: \"kubernetes.io/projected/a39b615c-006f-43b4-9f38-0fe1d0814696-kube-api-access-xvjjs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.355371 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.355386 4861 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0032cf8-4c45-4a4c-927d-686adba85ab1-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.355399 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.355412 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39b615c-006f-43b4-9f38-0fe1d0814696-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.356678 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1aa0d0b6-7731-421f-ac34-43cfd70e808c" (UID: "1aa0d0b6-7731-421f-ac34-43cfd70e808c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.358292 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1aa0d0b6-7731-421f-ac34-43cfd70e808c" (UID: "1aa0d0b6-7731-421f-ac34-43cfd70e808c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.375259 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "1aa0d0b6-7731-421f-ac34-43cfd70e808c" (UID: "1aa0d0b6-7731-421f-ac34-43cfd70e808c"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.375265 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-kube-api-access-2x47v" (OuterVolumeSpecName: "kube-api-access-2x47v") pod "1aa0d0b6-7731-421f-ac34-43cfd70e808c" (UID: "1aa0d0b6-7731-421f-ac34-43cfd70e808c"). InnerVolumeSpecName "kube-api-access-2x47v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.459676 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.459744 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data podName:3b8b1385-123a-4b60-af39-82d6492a65c2 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:41.459727984 +0000 UTC m=+1413.131222541 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data") pod "rabbitmq-cell1-server-0" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2") : configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.462977 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.463003 4861 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.463011 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1aa0d0b6-7731-421f-ac34-43cfd70e808c-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.463021 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2x47v\" (UniqueName: \"kubernetes.io/projected/1aa0d0b6-7731-421f-ac34-43cfd70e808c-kube-api-access-2x47v\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.518296 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-config-data" (OuterVolumeSpecName: "config-data") pod "1aa0d0b6-7731-421f-ac34-43cfd70e808c" (UID: "1aa0d0b6-7731-421f-ac34-43cfd70e808c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.528671 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1aa0d0b6-7731-421f-ac34-43cfd70e808c" (UID: "1aa0d0b6-7731-421f-ac34-43cfd70e808c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.531572 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1aa0d0b6-7731-421f-ac34-43cfd70e808c" (UID: "1aa0d0b6-7731-421f-ac34-43cfd70e808c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.533869 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1aa0d0b6-7731-421f-ac34-43cfd70e808c" (UID: "1aa0d0b6-7731-421f-ac34-43cfd70e808c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547115 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-5cwgv"] Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547450 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0032cf8-4c45-4a4c-927d-686adba85ab1" containerName="galera" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547462 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0032cf8-4c45-4a4c-927d-686adba85ab1" containerName="galera" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547471 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerName="ovsdbserver-nb" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547476 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerName="ovsdbserver-nb" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547488 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86dd900f-d608-496c-91b3-be95d914cf58" containerName="dnsmasq-dns" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547494 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="86dd900f-d608-496c-91b3-be95d914cf58" containerName="dnsmasq-dns" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547501 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4c159fd-7714-4351-8258-437e67ff5dbc" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547507 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4c159fd-7714-4351-8258-437e67ff5dbc" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547519 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="ovsdbserver-sb" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547524 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="ovsdbserver-sb" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547532 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547538 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547546 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86dd900f-d608-496c-91b3-be95d914cf58" containerName="init" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547551 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="86dd900f-d608-496c-91b3-be95d914cf58" containerName="init" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547562 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a871f110-29fe-4e80-b339-5209aebc0652" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547567 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a871f110-29fe-4e80-b339-5209aebc0652" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547580 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39b615c-006f-43b4-9f38-0fe1d0814696" containerName="nova-scheduler-scheduler" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547590 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39b615c-006f-43b4-9f38-0fe1d0814696" containerName="nova-scheduler-scheduler" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547605 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-server" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547611 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-server" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547634 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547640 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547649 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0032cf8-4c45-4a4c-927d-686adba85ab1" containerName="mysql-bootstrap" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547654 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0032cf8-4c45-4a4c-927d-686adba85ab1" containerName="mysql-bootstrap" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.547664 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-httpd" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547669 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-httpd" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547826 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547837 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-server" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547848 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0032cf8-4c45-4a4c-927d-686adba85ab1" containerName="galera" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547856 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="86dd900f-d608-496c-91b3-be95d914cf58" containerName="dnsmasq-dns" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547869 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a871f110-29fe-4e80-b339-5209aebc0652" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547878 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4c159fd-7714-4351-8258-437e67ff5dbc" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547886 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" containerName="proxy-httpd" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547898 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a39b615c-006f-43b4-9f38-0fe1d0814696" containerName="nova-scheduler-scheduler" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547909 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerName="ovsdbserver-nb" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547920 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1432e17-a4e2-4cde-a3d7-89eddf9973e1" containerName="ovsdbserver-sb" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.547935 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f8ce486-c345-41aa-b641-b7c4ef27ecfe" containerName="openstack-network-exporter" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.548462 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.552996 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.559110 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-5cwgv"] Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.568302 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpmbm\" (UniqueName: \"kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm\") pod \"nova-cell1-0391-account-create-update-jq5xd\" (UID: \"66f8ecd1-e1dd-4663-8681-59fe89a02691\") " pod="openstack/nova-cell1-0391-account-create-update-jq5xd" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.570962 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.571015 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.571026 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.571065 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa0d0b6-7731-421f-ac34-43cfd70e808c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.573570 4861 projected.go:194] Error preparing data for projected volume kube-api-access-fpmbm for pod openstack/nova-cell1-0391-account-create-update-jq5xd: failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Jan 29 06:58:37 crc kubenswrapper[4861]: E0129 06:58:37.573646 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm podName:66f8ecd1-e1dd-4663-8681-59fe89a02691 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:41.573625252 +0000 UTC m=+1413.245119899 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-fpmbm" (UniqueName: "kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm") pod "nova-cell1-0391-account-create-update-jq5xd" (UID: "66f8ecd1-e1dd-4663-8681-59fe89a02691") : failed to fetch token: serviceaccounts "galera-openstack-cell1" not found Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.673754 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzdk9\" (UniqueName: \"kubernetes.io/projected/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-kube-api-access-dzdk9\") pod \"root-account-create-update-5cwgv\" (UID: \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\") " pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.673803 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts\") pod \"root-account-create-update-5cwgv\" (UID: \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\") " pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.775278 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzdk9\" (UniqueName: \"kubernetes.io/projected/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-kube-api-access-dzdk9\") pod \"root-account-create-update-5cwgv\" (UID: \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\") " pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.775333 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts\") pod \"root-account-create-update-5cwgv\" (UID: \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\") " pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.776199 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts\") pod \"root-account-create-update-5cwgv\" (UID: \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\") " pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.783320 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.783575 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="ceilometer-central-agent" containerID="cri-o://e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e" gracePeriod=30 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.783685 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="proxy-httpd" containerID="cri-o://d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337" gracePeriod=30 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.783715 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="sg-core" containerID="cri-o://f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754" gracePeriod=30 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.783746 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="ceilometer-notification-agent" containerID="cri-o://9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea" gracePeriod=30 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.800604 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.800804 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="953c17ea-50f0-4111-8bc1-16819c1bce47" containerName="kube-state-metrics" containerID="cri-o://4c604e0c72a77e1a59db1a8f51efca23cb07d1fdc756481cde528fd19e684c10" gracePeriod=30 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.813615 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzdk9\" (UniqueName: \"kubernetes.io/projected/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-kube-api-access-dzdk9\") pod \"root-account-create-update-5cwgv\" (UID: \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\") " pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.920445 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-abe6-account-create-update-45grp"] Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.966044 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.966354 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="34dfd085-c2bc-4fa4-a950-7df85c48fec0" containerName="memcached" containerID="cri-o://644d3d4ecb0360b4f4094c99e7ffd6babd80fb95a501022e6a4b47e201e4406b" gracePeriod=30 Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.995262 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-abe6-account-create-update-45grp"] Jan 29 06:58:37 crc kubenswrapper[4861]: I0129 06:58:37.995305 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-abe6-account-create-update-dkvp7"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:37.999489 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.001140 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-abe6-account-create-update-dkvp7"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.003671 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.037199 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-xqbhr"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.051750 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-xqbhr"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.080143 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.096539 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7qxx\" (UniqueName: \"kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx\") pod \"keystone-abe6-account-create-update-dkvp7\" (UID: \"08bf60bc-3e57-4db1-8d7f-9c82ec4b5310\") " pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.096622 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts\") pod \"keystone-abe6-account-create-update-dkvp7\" (UID: \"08bf60bc-3e57-4db1-8d7f-9c82ec4b5310\") " pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.106571 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5b7fd56548-f8c7z"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.106799 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-5b7fd56548-f8c7z" podUID="27188e95-6192-4569-b254-c1e2d9b28086" containerName="keystone-api" containerID="cri-o://9d22b040aef5f1212a99cca021391b8a04401e06ef3cba31d31cf1356747f059" gracePeriod=30 Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.141096 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-qpnsr"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.153333 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-qpnsr"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.168635 4861 scope.go:117] "RemoveContainer" containerID="6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.169340 4861 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-5cwgv" secret="" err="secret \"galera-openstack-dockercfg-vxbxf\" not found" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.169375 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.199254 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts\") pod \"keystone-abe6-account-create-update-dkvp7\" (UID: \"08bf60bc-3e57-4db1-8d7f-9c82ec4b5310\") " pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.199382 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.199848 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7qxx\" (UniqueName: \"kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx\") pod \"keystone-abe6-account-create-update-dkvp7\" (UID: \"08bf60bc-3e57-4db1-8d7f-9c82ec4b5310\") " pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.200215 4861 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.200251 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts podName:08bf60bc-3e57-4db1-8d7f-9c82ec4b5310 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:38.700238857 +0000 UTC m=+1410.371733414 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts") pod "keystone-abe6-account-create-update-dkvp7" (UID: "08bf60bc-3e57-4db1-8d7f-9c82ec4b5310") : configmap "openstack-scripts" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.217358 4861 projected.go:194] Error preparing data for projected volume kube-api-access-k7qxx for pod openstack/keystone-abe6-account-create-update-dkvp7: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.217433 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx podName:08bf60bc-3e57-4db1-8d7f-9c82ec4b5310 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:38.717410747 +0000 UTC m=+1410.388905304 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-k7qxx" (UniqueName: "kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx") pod "keystone-abe6-account-create-update-dkvp7" (UID: "08bf60bc-3e57-4db1-8d7f-9c82ec4b5310") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.223978 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-kb8t5"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.254138 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-kb8t5"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.271479 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.291415 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-abe6-account-create-update-dkvp7"] Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.298465 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-k7qxx operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-abe6-account-create-update-dkvp7" podUID="08bf60bc-3e57-4db1-8d7f-9c82ec4b5310" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.301618 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c95c6e-e80b-4d39-8209-1dbd1c237351-operator-scripts\") pod \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\" (UID: \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.301732 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7hgd\" (UniqueName: \"kubernetes.io/projected/b9c95c6e-e80b-4d39-8209-1dbd1c237351-kube-api-access-n7hgd\") pod \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\" (UID: \"b9c95c6e-e80b-4d39-8209-1dbd1c237351\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.302544 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9c95c6e-e80b-4d39-8209-1dbd1c237351-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b9c95c6e-e80b-4d39-8209-1dbd1c237351" (UID: "b9c95c6e-e80b-4d39-8209-1dbd1c237351"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.305396 4861 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.305446 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts podName:50ce05f5-34ea-4c94-ad92-9b458fa5c3c7 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:38.805431768 +0000 UTC m=+1410.476926325 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts") pod "root-account-create-update-5cwgv" (UID: "50ce05f5-34ea-4c94-ad92-9b458fa5c3c7") : configmap "openstack-scripts" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.305791 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.305829 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data podName:5966cedc-8ab5-4390-906b-c5ac39333e09 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:42.305820977 +0000 UTC m=+1413.977315534 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data") pod "rabbitmq-server-0" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09") : configmap "rabbitmq-config-data" not found Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.307880 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c95c6e-e80b-4d39-8209-1dbd1c237351-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.313168 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-5cwgv"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.331493 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9c95c6e-e80b-4d39-8209-1dbd1c237351-kube-api-access-n7hgd" (OuterVolumeSpecName: "kube-api-access-n7hgd") pod "b9c95c6e-e80b-4d39-8209-1dbd1c237351" (UID: "b9c95c6e-e80b-4d39-8209-1dbd1c237351"). InnerVolumeSpecName "kube-api-access-n7hgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.365998 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-20e6-account-create-update-wrx86"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.371615 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c008-account-create-update-cjb9h" event={"ID":"1f95675a-f692-4c29-90da-01eda11003ac","Type":"ContainerDied","Data":"54b795e324a242b47be3700c2be33ac4d04851fc95a8acea2639174edb259e21"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.371657 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54b795e324a242b47be3700c2be33ac4d04851fc95a8acea2639174edb259e21" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.381407 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-20e6-account-create-update-wrx86"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.389786 4861 generic.go:334] "Generic (PLEG): container finished" podID="d72b59e5-64c2-4eab-955e-89d6298e834e" containerID="6756abe8a1b0340d3aa8c881cb242ac677f7ceda5ed061f5774b08d04e550a63" exitCode=0 Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.389852 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d72b59e5-64c2-4eab-955e-89d6298e834e","Type":"ContainerDied","Data":"6756abe8a1b0340d3aa8c881cb242ac677f7ceda5ed061f5774b08d04e550a63"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.389877 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"d72b59e5-64c2-4eab-955e-89d6298e834e","Type":"ContainerDied","Data":"84da739768030e27f5faa0b4a707ce4784ad756c7694fd1b62ad8eb84653921e"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.389891 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84da739768030e27f5faa0b4a707ce4784ad756c7694fd1b62ad8eb84653921e" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.408578 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zh9b7" event={"ID":"441b7714-dd72-448b-a5a8-b9f56057da43","Type":"ContainerDied","Data":"b1abb4c232dd163ca21fd48e8a560d095fe73abcd9496bafe43db977fec11631"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.409025 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1abb4c232dd163ca21fd48e8a560d095fe73abcd9496bafe43db977fec11631" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.410187 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b64fk\" (UniqueName: \"kubernetes.io/projected/f7dfeca3-8fa1-4323-aab9-13f91619ec59-kube-api-access-b64fk\") pod \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.410305 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-catalog-content\") pod \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.410366 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-utilities\") pod \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\" (UID: \"f7dfeca3-8fa1-4323-aab9-13f91619ec59\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.411422 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7hgd\" (UniqueName: \"kubernetes.io/projected/b9c95c6e-e80b-4d39-8209-1dbd1c237351-kube-api-access-n7hgd\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.413652 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": read tcp 10.217.0.2:53014->10.217.0.210:8775: read: connection reset by peer" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.414330 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": read tcp 10.217.0.2:53030->10.217.0.210:8775: read: connection reset by peer" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.416186 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.421894 4861 generic.go:334] "Generic (PLEG): container finished" podID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerID="dfde65f746a284f0742bfc416b14134fe608d8f0ca69edc5ab5445ada8954bbe" exitCode=0 Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.421969 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6c25bacb-4105-4fa4-a798-117f9cbe75fe","Type":"ContainerDied","Data":"dfde65f746a284f0742bfc416b14134fe608d8f0ca69edc5ab5445ada8954bbe"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.423962 4861 generic.go:334] "Generic (PLEG): container finished" podID="953c17ea-50f0-4111-8bc1-16819c1bce47" containerID="4c604e0c72a77e1a59db1a8f51efca23cb07d1fdc756481cde528fd19e684c10" exitCode=2 Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.424021 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"953c17ea-50f0-4111-8bc1-16819c1bce47","Type":"ContainerDied","Data":"4c604e0c72a77e1a59db1a8f51efca23cb07d1fdc756481cde528fd19e684c10"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.430753 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-utilities" (OuterVolumeSpecName: "utilities") pod "f7dfeca3-8fa1-4323-aab9-13f91619ec59" (UID: "f7dfeca3-8fa1-4323-aab9-13f91619ec59"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.435126 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.450017 4861 generic.go:334] "Generic (PLEG): container finished" podID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerID="380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9" exitCode=0 Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.450136 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wczkh" event={"ID":"f7dfeca3-8fa1-4323-aab9-13f91619ec59","Type":"ContainerDied","Data":"380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.450164 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wczkh" event={"ID":"f7dfeca3-8fa1-4323-aab9-13f91619ec59","Type":"ContainerDied","Data":"666edcbc8b340c7a6b84386d45c282abd9454249b763fdd3ead23f94d9987590"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.450302 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wczkh" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.465242 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.472382 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7dfeca3-8fa1-4323-aab9-13f91619ec59-kube-api-access-b64fk" (OuterVolumeSpecName: "kube-api-access-b64fk") pod "f7dfeca3-8fa1-4323-aab9-13f91619ec59" (UID: "f7dfeca3-8fa1-4323-aab9-13f91619ec59"). InnerVolumeSpecName "kube-api-access-b64fk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.487592 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f01-account-create-update-874c5" event={"ID":"b9c95c6e-e80b-4d39-8209-1dbd1c237351","Type":"ContainerDied","Data":"e010289644a0b068915e18a6424eccc7213da7b01216d521e37e07734771ba04"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.487694 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f01-account-create-update-874c5" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.495707 4861 scope.go:117] "RemoveContainer" containerID="81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.495795 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerName="galera" containerID="cri-o://9c170b6b24190e8407f34d3e2aa2e80d167bb93c61c16add369b12444c54d78a" gracePeriod=30 Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.502177 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499\": container with ID starting with 81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499 not found: ID does not exist" containerID="81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.502221 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499"} err="failed to get container status \"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499\": rpc error: code = NotFound desc = could not find container \"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499\": container with ID starting with 81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499 not found: ID does not exist" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.502242 4861 scope.go:117] "RemoveContainer" containerID="6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd" Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.505488 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd\": container with ID starting with 6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd not found: ID does not exist" containerID="6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.505516 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd"} err="failed to get container status \"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd\": rpc error: code = NotFound desc = could not find container \"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd\": container with ID starting with 6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd not found: ID does not exist" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.505534 4861 scope.go:117] "RemoveContainer" containerID="81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.507372 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499"} err="failed to get container status \"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499\": rpc error: code = NotFound desc = could not find container \"81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499\": container with ID starting with 81505c1db8b088c131fdf4cf90c2fbb46eb1629743af8f83c4c0240c9c07a499 not found: ID does not exist" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.507400 4861 scope.go:117] "RemoveContainer" containerID="6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.510003 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd"} err="failed to get container status \"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd\": rpc error: code = NotFound desc = could not find container \"6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd\": container with ID starting with 6d6a69e07f4510ed6294d2e03cc169de88825bfc03dbbb4ca70ea35ee8779fcd not found: ID does not exist" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.510046 4861 scope.go:117] "RemoveContainer" containerID="9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.512543 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-config-data\") pod \"d72b59e5-64c2-4eab-955e-89d6298e834e\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.512607 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsz82\" (UniqueName: \"kubernetes.io/projected/d72b59e5-64c2-4eab-955e-89d6298e834e-kube-api-access-rsz82\") pod \"d72b59e5-64c2-4eab-955e-89d6298e834e\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.512806 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-combined-ca-bundle\") pod \"d72b59e5-64c2-4eab-955e-89d6298e834e\" (UID: \"d72b59e5-64c2-4eab-955e-89d6298e834e\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.516049 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.516917 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b64fk\" (UniqueName: \"kubernetes.io/projected/f7dfeca3-8fa1-4323-aab9-13f91619ec59-kube-api-access-b64fk\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.533446 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-dfed-account-create-update-t7cnb"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.535002 4861 generic.go:334] "Generic (PLEG): container finished" podID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerID="d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337" exitCode=0 Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.535054 4861 generic.go:334] "Generic (PLEG): container finished" podID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerID="f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754" exitCode=2 Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.535142 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.535606 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerDied","Data":"d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.535634 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerDied","Data":"f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754"} Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.547404 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d72b59e5-64c2-4eab-955e-89d6298e834e-kube-api-access-rsz82" (OuterVolumeSpecName: "kube-api-access-rsz82") pod "d72b59e5-64c2-4eab-955e-89d6298e834e" (UID: "d72b59e5-64c2-4eab-955e-89d6298e834e"). InnerVolumeSpecName "kube-api-access-rsz82". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.547484 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-dfed-account-create-update-t7cnb"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.548707 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.553683 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.556160 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.572592 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.578954 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-config-data" (OuterVolumeSpecName: "config-data") pod "d72b59e5-64c2-4eab-955e-89d6298e834e" (UID: "d72b59e5-64c2-4eab-955e-89d6298e834e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.583364 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d72b59e5-64c2-4eab-955e-89d6298e834e" (UID: "d72b59e5-64c2-4eab-955e-89d6298e834e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.603559 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.603709 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.618266 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7dfeca3-8fa1-4323-aab9-13f91619ec59" (UID: "f7dfeca3-8fa1-4323-aab9-13f91619ec59"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.619674 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8stx\" (UniqueName: \"kubernetes.io/projected/1f95675a-f692-4c29-90da-01eda11003ac-kube-api-access-z8stx\") pod \"1f95675a-f692-4c29-90da-01eda11003ac\" (UID: \"1f95675a-f692-4c29-90da-01eda11003ac\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.619733 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f95675a-f692-4c29-90da-01eda11003ac-operator-scripts\") pod \"1f95675a-f692-4c29-90da-01eda11003ac\" (UID: \"1f95675a-f692-4c29-90da-01eda11003ac\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.619853 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2jsc\" (UniqueName: \"kubernetes.io/projected/441b7714-dd72-448b-a5a8-b9f56057da43-kube-api-access-l2jsc\") pod \"441b7714-dd72-448b-a5a8-b9f56057da43\" (UID: \"441b7714-dd72-448b-a5a8-b9f56057da43\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.620019 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts\") pod \"441b7714-dd72-448b-a5a8-b9f56057da43\" (UID: \"441b7714-dd72-448b-a5a8-b9f56057da43\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.621916 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "441b7714-dd72-448b-a5a8-b9f56057da43" (UID: "441b7714-dd72-448b-a5a8-b9f56057da43"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.623712 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.623983 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7dfeca3-8fa1-4323-aab9-13f91619ec59-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.624001 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/441b7714-dd72-448b-a5a8-b9f56057da43-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.624016 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.624027 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d72b59e5-64c2-4eab-955e-89d6298e834e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.624037 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsz82\" (UniqueName: \"kubernetes.io/projected/d72b59e5-64c2-4eab-955e-89d6298e834e-kube-api-access-rsz82\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.624846 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.626560 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f95675a-f692-4c29-90da-01eda11003ac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1f95675a-f692-4c29-90da-01eda11003ac" (UID: "1f95675a-f692-4c29-90da-01eda11003ac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.643104 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/441b7714-dd72-448b-a5a8-b9f56057da43-kube-api-access-l2jsc" (OuterVolumeSpecName: "kube-api-access-l2jsc") pod "441b7714-dd72-448b-a5a8-b9f56057da43" (UID: "441b7714-dd72-448b-a5a8-b9f56057da43"). InnerVolumeSpecName "kube-api-access-l2jsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.644808 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.654630 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f95675a-f692-4c29-90da-01eda11003ac-kube-api-access-z8stx" (OuterVolumeSpecName: "kube-api-access-z8stx") pod "1f95675a-f692-4c29-90da-01eda11003ac" (UID: "1f95675a-f692-4c29-90da-01eda11003ac"). InnerVolumeSpecName "kube-api-access-z8stx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.658256 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9c170b6b24190e8407f34d3e2aa2e80d167bb93c61c16add369b12444c54d78a" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.659877 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9c170b6b24190e8407f34d3e2aa2e80d167bb93c61c16add369b12444c54d78a" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.660523 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.691133 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-58dc7dd48c-t4mkl"] Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.692550 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9c170b6b24190e8407f34d3e2aa2e80d167bb93c61c16add369b12444c54d78a" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.692625 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerName="galera" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.725060 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66a7721f-92e9-498c-97b8-cbe9890220d9-operator-scripts\") pod \"66a7721f-92e9-498c-97b8-cbe9890220d9\" (UID: \"66a7721f-92e9-498c-97b8-cbe9890220d9\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.725576 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3949b327-31a1-4dfa-bc04-e13b6c033ecd-operator-scripts\") pod \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\" (UID: \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.725673 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7d75aa6-1d18-4901-874c-e6b9db142421-operator-scripts\") pod \"b7d75aa6-1d18-4901-874c-e6b9db142421\" (UID: \"b7d75aa6-1d18-4901-874c-e6b9db142421\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.725755 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9fsj\" (UniqueName: \"kubernetes.io/projected/b7d75aa6-1d18-4901-874c-e6b9db142421-kube-api-access-s9fsj\") pod \"b7d75aa6-1d18-4901-874c-e6b9db142421\" (UID: \"b7d75aa6-1d18-4901-874c-e6b9db142421\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.726469 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3949b327-31a1-4dfa-bc04-e13b6c033ecd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3949b327-31a1-4dfa-bc04-e13b6c033ecd" (UID: "3949b327-31a1-4dfa-bc04-e13b6c033ecd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.726822 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66a7721f-92e9-498c-97b8-cbe9890220d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "66a7721f-92e9-498c-97b8-cbe9890220d9" (UID: "66a7721f-92e9-498c-97b8-cbe9890220d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.726847 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7d75aa6-1d18-4901-874c-e6b9db142421-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b7d75aa6-1d18-4901-874c-e6b9db142421" (UID: "b7d75aa6-1d18-4901-874c-e6b9db142421"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.727775 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58tc6\" (UniqueName: \"kubernetes.io/projected/66a7721f-92e9-498c-97b8-cbe9890220d9-kube-api-access-58tc6\") pod \"66a7721f-92e9-498c-97b8-cbe9890220d9\" (UID: \"66a7721f-92e9-498c-97b8-cbe9890220d9\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.728451 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qc7r5\" (UniqueName: \"kubernetes.io/projected/3949b327-31a1-4dfa-bc04-e13b6c033ecd-kube-api-access-qc7r5\") pod \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\" (UID: \"3949b327-31a1-4dfa-bc04-e13b6c033ecd\") " Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.730285 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7qxx\" (UniqueName: \"kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx\") pod \"keystone-abe6-account-create-update-dkvp7\" (UID: \"08bf60bc-3e57-4db1-8d7f-9c82ec4b5310\") " pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.730419 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts\") pod \"keystone-abe6-account-create-update-dkvp7\" (UID: \"08bf60bc-3e57-4db1-8d7f-9c82ec4b5310\") " pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.730573 4861 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.730672 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts podName:08bf60bc-3e57-4db1-8d7f-9c82ec4b5310 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:39.730654976 +0000 UTC m=+1411.402149533 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts") pod "keystone-abe6-account-create-update-dkvp7" (UID: "08bf60bc-3e57-4db1-8d7f-9c82ec4b5310") : configmap "openstack-scripts" not found Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.730728 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66a7721f-92e9-498c-97b8-cbe9890220d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.730745 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8stx\" (UniqueName: \"kubernetes.io/projected/1f95675a-f692-4c29-90da-01eda11003ac-kube-api-access-z8stx\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.730757 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f95675a-f692-4c29-90da-01eda11003ac-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.730792 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3949b327-31a1-4dfa-bc04-e13b6c033ecd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.730804 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7d75aa6-1d18-4901-874c-e6b9db142421-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.730814 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2jsc\" (UniqueName: \"kubernetes.io/projected/441b7714-dd72-448b-a5a8-b9f56057da43-kube-api-access-l2jsc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.732692 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7d75aa6-1d18-4901-874c-e6b9db142421-kube-api-access-s9fsj" (OuterVolumeSpecName: "kube-api-access-s9fsj") pod "b7d75aa6-1d18-4901-874c-e6b9db142421" (UID: "b7d75aa6-1d18-4901-874c-e6b9db142421"). InnerVolumeSpecName "kube-api-access-s9fsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.735032 4861 projected.go:194] Error preparing data for projected volume kube-api-access-k7qxx for pod openstack/keystone-abe6-account-create-update-dkvp7: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.735091 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx podName:08bf60bc-3e57-4db1-8d7f-9c82ec4b5310 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:39.735079272 +0000 UTC m=+1411.406573829 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-k7qxx" (UniqueName: "kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx") pod "keystone-abe6-account-create-update-dkvp7" (UID: "08bf60bc-3e57-4db1-8d7f-9c82ec4b5310") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.735388 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a7721f-92e9-498c-97b8-cbe9890220d9-kube-api-access-58tc6" (OuterVolumeSpecName: "kube-api-access-58tc6") pod "66a7721f-92e9-498c-97b8-cbe9890220d9" (UID: "66a7721f-92e9-498c-97b8-cbe9890220d9"). InnerVolumeSpecName "kube-api-access-58tc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.735460 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-58dc7dd48c-t4mkl"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.736222 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3949b327-31a1-4dfa-bc04-e13b6c033ecd-kube-api-access-qc7r5" (OuterVolumeSpecName: "kube-api-access-qc7r5") pod "3949b327-31a1-4dfa-bc04-e13b6c033ecd" (UID: "3949b327-31a1-4dfa-bc04-e13b6c033ecd"). InnerVolumeSpecName "kube-api-access-qc7r5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.748262 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-0391-account-create-update-jq5xd"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.751903 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-0391-account-create-update-jq5xd"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.789939 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-874c5"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.811116 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-1f01-account-create-update-874c5"] Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.831850 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qc7r5\" (UniqueName: \"kubernetes.io/projected/3949b327-31a1-4dfa-bc04-e13b6c033ecd-kube-api-access-qc7r5\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.831874 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpmbm\" (UniqueName: \"kubernetes.io/projected/66f8ecd1-e1dd-4663-8681-59fe89a02691-kube-api-access-fpmbm\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.831884 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9fsj\" (UniqueName: \"kubernetes.io/projected/b7d75aa6-1d18-4901-874c-e6b9db142421-kube-api-access-s9fsj\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.831894 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58tc6\" (UniqueName: \"kubernetes.io/projected/66a7721f-92e9-498c-97b8-cbe9890220d9-kube-api-access-58tc6\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.831992 4861 secret.go:188] Couldn't get secret openstack/neutron-config: secret "neutron-config" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.832034 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:42.832020566 +0000 UTC m=+1414.503515123 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-config" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.832349 4861 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.832428 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts podName:50ce05f5-34ea-4c94-ad92-9b458fa5c3c7 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:39.832409905 +0000 UTC m=+1411.503904452 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts") pod "root-account-create-update-5cwgv" (UID: "50ce05f5-34ea-4c94-ad92-9b458fa5c3c7") : configmap "openstack-scripts" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.832958 4861 secret.go:188] Couldn't get secret openstack/neutron-httpd-config: secret "neutron-httpd-config" not found Jan 29 06:58:38 crc kubenswrapper[4861]: E0129 06:58:38.833105 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:42.833015019 +0000 UTC m=+1414.504509576 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "httpd-config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-httpd-config" not found Jan 29 06:58:38 crc kubenswrapper[4861]: I0129 06:58:38.922984 4861 scope.go:117] "RemoveContainer" containerID="6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.068168 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.079693 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wczkh"] Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.085473 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wczkh"] Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.089419 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.090702 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6957679874-pnq22" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.145400 4861 scope.go:117] "RemoveContainer" containerID="9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.146866 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-public-tls-certs\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147240 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9znn\" (UniqueName: \"kubernetes.io/projected/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-api-access-n9znn\") pod \"953c17ea-50f0-4111-8bc1-16819c1bce47\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147321 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-scripts\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147348 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-internal-tls-certs\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147394 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c25bacb-4105-4fa4-a798-117f9cbe75fe-etc-machine-id\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147422 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-combined-ca-bundle\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147468 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-config-data\") pod \"4b488de3-67a5-49cf-a61a-37a44acbbe19\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147487 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-scripts\") pod \"4b488de3-67a5-49cf-a61a-37a44acbbe19\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147520 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-combined-ca-bundle\") pod \"4b488de3-67a5-49cf-a61a-37a44acbbe19\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147545 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147564 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-combined-ca-bundle\") pod \"953c17ea-50f0-4111-8bc1-16819c1bce47\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147580 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data-custom\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147603 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b488de3-67a5-49cf-a61a-37a44acbbe19-logs\") pod \"4b488de3-67a5-49cf-a61a-37a44acbbe19\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147632 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-public-tls-certs\") pod \"4b488de3-67a5-49cf-a61a-37a44acbbe19\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147660 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-internal-tls-certs\") pod \"4b488de3-67a5-49cf-a61a-37a44acbbe19\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147689 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c25bacb-4105-4fa4-a798-117f9cbe75fe-logs\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147706 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-config\") pod \"953c17ea-50f0-4111-8bc1-16819c1bce47\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147721 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkh4z\" (UniqueName: \"kubernetes.io/projected/6c25bacb-4105-4fa4-a798-117f9cbe75fe-kube-api-access-jkh4z\") pod \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\" (UID: \"6c25bacb-4105-4fa4-a798-117f9cbe75fe\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147741 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-certs\") pod \"953c17ea-50f0-4111-8bc1-16819c1bce47\" (UID: \"953c17ea-50f0-4111-8bc1-16819c1bce47\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147758 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmz2z\" (UniqueName: \"kubernetes.io/projected/4b488de3-67a5-49cf-a61a-37a44acbbe19-kube-api-access-tmz2z\") pod \"4b488de3-67a5-49cf-a61a-37a44acbbe19\" (UID: \"4b488de3-67a5-49cf-a61a-37a44acbbe19\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.147778 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6c25bacb-4105-4fa4-a798-117f9cbe75fe-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.148191 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c25bacb-4105-4fa4-a798-117f9cbe75fe-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.150509 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3\": container with ID starting with 9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3 not found: ID does not exist" containerID="9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.150555 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3"} err="failed to get container status \"9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3\": rpc error: code = NotFound desc = could not find container \"9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3\": container with ID starting with 9283ae3df86e6d114127f4db8afe491fafe7c1e7ab9d13dee4010dc629bb93b3 not found: ID does not exist" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.150579 4861 scope.go:117] "RemoveContainer" containerID="6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96" Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.151212 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96\": container with ID starting with 6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96 not found: ID does not exist" containerID="6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.151236 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96"} err="failed to get container status \"6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96\": rpc error: code = NotFound desc = could not find container \"6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96\": container with ID starting with 6f6b147a68b87166673439ce069bebd0664bb845eb5e21b010e4edf985700b96 not found: ID does not exist" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.151248 4861 scope.go:117] "RemoveContainer" containerID="004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.152310 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c25bacb-4105-4fa4-a798-117f9cbe75fe-logs" (OuterVolumeSpecName: "logs") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.153442 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b488de3-67a5-49cf-a61a-37a44acbbe19-logs" (OuterVolumeSpecName: "logs") pod "4b488de3-67a5-49cf-a61a-37a44acbbe19" (UID: "4b488de3-67a5-49cf-a61a-37a44acbbe19"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.154644 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-api-access-n9znn" (OuterVolumeSpecName: "kube-api-access-n9znn") pod "953c17ea-50f0-4111-8bc1-16819c1bce47" (UID: "953c17ea-50f0-4111-8bc1-16819c1bce47"). InnerVolumeSpecName "kube-api-access-n9znn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.156599 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.159367 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-scripts" (OuterVolumeSpecName: "scripts") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.159464 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b488de3-67a5-49cf-a61a-37a44acbbe19-kube-api-access-tmz2z" (OuterVolumeSpecName: "kube-api-access-tmz2z") pod "4b488de3-67a5-49cf-a61a-37a44acbbe19" (UID: "4b488de3-67a5-49cf-a61a-37a44acbbe19"). InnerVolumeSpecName "kube-api-access-tmz2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.160829 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-scripts" (OuterVolumeSpecName: "scripts") pod "4b488de3-67a5-49cf-a61a-37a44acbbe19" (UID: "4b488de3-67a5-49cf-a61a-37a44acbbe19"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.169378 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1aa0d0b6-7731-421f-ac34-43cfd70e808c" path="/var/lib/kubelet/pods/1aa0d0b6-7731-421f-ac34-43cfd70e808c/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.171045 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46d0aaff-25ca-4605-9b55-ee1f5a897ff2" path="/var/lib/kubelet/pods/46d0aaff-25ca-4605-9b55-ee1f5a897ff2/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.172008 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bc33551-a784-45a5-8184-ada61e659999" path="/var/lib/kubelet/pods/4bc33551-a784-45a5-8184-ada61e659999/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.172530 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66f8ecd1-e1dd-4663-8681-59fe89a02691" path="/var/lib/kubelet/pods/66f8ecd1-e1dd-4663-8681-59fe89a02691/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.173333 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70ddea03-eaa7-41ff-8bfe-b050ef7848b5" path="/var/lib/kubelet/pods/70ddea03-eaa7-41ff-8bfe-b050ef7848b5/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.174697 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a38ce6f-f3ff-4976-8acb-9576d89df924" path="/var/lib/kubelet/pods/9a38ce6f-f3ff-4976-8acb-9576d89df924/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.175854 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0032cf8-4c45-4a4c-927d-686adba85ab1" path="/var/lib/kubelet/pods/a0032cf8-4c45-4a4c-927d-686adba85ab1/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.183141 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a39b615c-006f-43b4-9f38-0fe1d0814696" path="/var/lib/kubelet/pods/a39b615c-006f-43b4-9f38-0fe1d0814696/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.183596 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9c95c6e-e80b-4d39-8209-1dbd1c237351" path="/var/lib/kubelet/pods/b9c95c6e-e80b-4d39-8209-1dbd1c237351/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.183924 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4c159fd-7714-4351-8258-437e67ff5dbc" path="/var/lib/kubelet/pods/c4c159fd-7714-4351-8258-437e67ff5dbc/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.184823 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d48d2111-d309-4775-b728-cdd8b7163ebc" path="/var/lib/kubelet/pods/d48d2111-d309-4775-b728-cdd8b7163ebc/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.185537 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8180980-2786-4514-a34e-4d68009ea724" path="/var/lib/kubelet/pods/d8180980-2786-4514-a34e-4d68009ea724/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.186006 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" path="/var/lib/kubelet/pods/f7dfeca3-8fa1-4323-aab9-13f91619ec59/volumes" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.188510 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c25bacb-4105-4fa4-a798-117f9cbe75fe-kube-api-access-jkh4z" (OuterVolumeSpecName: "kube-api-access-jkh4z") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "kube-api-access-jkh4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.195343 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-7f868dbfb9-5bdsk" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.169:9696/\": dial tcp 10.217.0.169:9696: connect: connection refused" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.249743 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c25bacb-4105-4fa4-a798-117f9cbe75fe-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.251064 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkh4z\" (UniqueName: \"kubernetes.io/projected/6c25bacb-4105-4fa4-a798-117f9cbe75fe-kube-api-access-jkh4z\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.251116 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmz2z\" (UniqueName: \"kubernetes.io/projected/4b488de3-67a5-49cf-a61a-37a44acbbe19-kube-api-access-tmz2z\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.251132 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9znn\" (UniqueName: \"kubernetes.io/projected/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-api-access-n9znn\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.251142 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.251161 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.251170 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.251179 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b488de3-67a5-49cf-a61a-37a44acbbe19-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.256550 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.284465 4861 scope.go:117] "RemoveContainer" containerID="004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95" Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.298240 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95\": container with ID starting with 004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95 not found: ID does not exist" containerID="004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.298289 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95"} err="failed to get container status \"004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95\": rpc error: code = NotFound desc = could not find container \"004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95\": container with ID starting with 004f8723fe4ab1d287ca5c1f3c284c414e5da6ded7ca4ae0fe69d0bd16343e95 not found: ID does not exist" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.298320 4861 scope.go:117] "RemoveContainer" containerID="380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.300823 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "953c17ea-50f0-4111-8bc1-16819c1bce47" (UID: "953c17ea-50f0-4111-8bc1-16819c1bce47"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.355174 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-nova-metadata-tls-certs\") pod \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.355295 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-config-data\") pod \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.355328 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctgbt\" (UniqueName: \"kubernetes.io/projected/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-kube-api-access-ctgbt\") pod \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.355360 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-combined-ca-bundle\") pod \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.355538 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-logs\") pod \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\" (UID: \"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.356298 4861 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.386645 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-logs" (OuterVolumeSpecName: "logs") pod "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" (UID: "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.462904 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.484155 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-kube-api-access-ctgbt" (OuterVolumeSpecName: "kube-api-access-ctgbt") pod "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" (UID: "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed"). InnerVolumeSpecName "kube-api-access-ctgbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.533329 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.548273 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.566979 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nphx5\" (UniqueName: \"kubernetes.io/projected/5ce76094-c71f-46c7-a69d-7d30d8540c5a-kube-api-access-nphx5\") pod \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.567102 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-scripts\") pod \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.567137 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-config-data\") pod \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.567195 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-combined-ca-bundle\") pod \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.567263 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-httpd-run\") pod \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.567286 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-logs\") pod \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.567301 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-internal-tls-certs\") pod \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.567596 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\" (UID: \"5ce76094-c71f-46c7-a69d-7d30d8540c5a\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.568083 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctgbt\" (UniqueName: \"kubernetes.io/projected/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-kube-api-access-ctgbt\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.568101 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.568403 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5ce76094-c71f-46c7-a69d-7d30d8540c5a" (UID: "5ce76094-c71f-46c7-a69d-7d30d8540c5a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.568513 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-logs" (OuterVolumeSpecName: "logs") pod "5ce76094-c71f-46c7-a69d-7d30d8540c5a" (UID: "5ce76094-c71f-46c7-a69d-7d30d8540c5a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.572374 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6c25bacb-4105-4fa4-a798-117f9cbe75fe","Type":"ContainerDied","Data":"5c5c830c9d89a378027db88210b7e21af05dd3224886f16d3185b764989e2543"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.572466 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.573919 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.576812 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.577729 4861 generic.go:334] "Generic (PLEG): container finished" podID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerID="114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.577786 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.577818 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed","Type":"ContainerDied","Data":"114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.578001 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed","Type":"ContainerDied","Data":"78f1c9bf2f659a883e25f8dbd870de96e32a2713d76aceed4dc8cc3c9e561f18"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.580195 4861 generic.go:334] "Generic (PLEG): container finished" podID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerID="d3f897c89c2801586a375c91d0d6297c2d965784611ff0abf1834bdaf78b6197" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.580263 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e78be7f2-60d4-4f0e-a510-bf5e652110d1","Type":"ContainerDied","Data":"d3f897c89c2801586a375c91d0d6297c2d965784611ff0abf1834bdaf78b6197"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.582835 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-733f-account-create-update-v8gq6" event={"ID":"b7d75aa6-1d18-4901-874c-e6b9db142421","Type":"ContainerDied","Data":"53a6286b6d02178a4f0d214f3a7fcc544ec3d450f22f31162083f4d0efba00bc"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.582842 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-733f-account-create-update-v8gq6" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.586765 4861 generic.go:334] "Generic (PLEG): container finished" podID="947c222c-8f0c-423f-84e8-75a4b9322829" containerID="a95782607794ffb075c46075902d651f0f6db3732fc62ef57c3c4e66ef00c4f4" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.586849 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d8b667488-v7lmh" event={"ID":"947c222c-8f0c-423f-84e8-75a4b9322829","Type":"ContainerDied","Data":"a95782607794ffb075c46075902d651f0f6db3732fc62ef57c3c4e66ef00c4f4"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.586911 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d8b667488-v7lmh" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.588649 4861 generic.go:334] "Generic (PLEG): container finished" podID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerID="12d75e57461b87b2ec9d6d00c1b304c2545872ff7dcee032f19e27ff512c2516" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.588681 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b8d96575c-7zzfv" event={"ID":"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3","Type":"ContainerDied","Data":"12d75e57461b87b2ec9d6d00c1b304c2545872ff7dcee032f19e27ff512c2516"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.588766 4861 scope.go:117] "RemoveContainer" containerID="f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.627551 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3399-account-create-update-w9zr8" event={"ID":"3949b327-31a1-4dfa-bc04-e13b6c033ecd","Type":"ContainerDied","Data":"1f6ac1e97f55e920c21c072eeadbbb6450b90bfe5bd5506f6a15752b0332258b"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.627630 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3399-account-create-update-w9zr8" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.628327 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-scripts" (OuterVolumeSpecName: "scripts") pod "5ce76094-c71f-46c7-a69d-7d30d8540c5a" (UID: "5ce76094-c71f-46c7-a69d-7d30d8540c5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.628962 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ce76094-c71f-46c7-a69d-7d30d8540c5a-kube-api-access-nphx5" (OuterVolumeSpecName: "kube-api-access-nphx5") pod "5ce76094-c71f-46c7-a69d-7d30d8540c5a" (UID: "5ce76094-c71f-46c7-a69d-7d30d8540c5a"). InnerVolumeSpecName "kube-api-access-nphx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.629690 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "953c17ea-50f0-4111-8bc1-16819c1bce47" (UID: "953c17ea-50f0-4111-8bc1-16819c1bce47"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.632453 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"953c17ea-50f0-4111-8bc1-16819c1bce47","Type":"ContainerDied","Data":"1b991737399547fcfaec149607e840c70465d2417b8908a8836f74faabdfdabd"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.632620 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.635193 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "5ce76094-c71f-46c7-a69d-7d30d8540c5a" (UID: "5ce76094-c71f-46c7-a69d-7d30d8540c5a"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.635271 4861 generic.go:334] "Generic (PLEG): container finished" podID="34dfd085-c2bc-4fa4-a950-7df85c48fec0" containerID="644d3d4ecb0360b4f4094c99e7ffd6babd80fb95a501022e6a4b47e201e4406b" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.635357 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.635408 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"34dfd085-c2bc-4fa4-a950-7df85c48fec0","Type":"ContainerDied","Data":"644d3d4ecb0360b4f4094c99e7ffd6babd80fb95a501022e6a4b47e201e4406b"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.635904 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-config-data" (OuterVolumeSpecName: "config-data") pod "4b488de3-67a5-49cf-a61a-37a44acbbe19" (UID: "4b488de3-67a5-49cf-a61a-37a44acbbe19"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.637601 4861 generic.go:334] "Generic (PLEG): container finished" podID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerID="c312a62932c0664c9e8bde4d291603923f2e2f621322670c3abe137b105616db" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.637727 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" event={"ID":"bd9ed061-0329-42e0-8cca-e7b560c7a19c","Type":"ContainerDied","Data":"c312a62932c0664c9e8bde4d291603923f2e2f621322670c3abe137b105616db"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.642589 4861 generic.go:334] "Generic (PLEG): container finished" podID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerID="64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.642667 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6957679874-pnq22" event={"ID":"4b488de3-67a5-49cf-a61a-37a44acbbe19","Type":"ContainerDied","Data":"64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.642718 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6957679874-pnq22" event={"ID":"4b488de3-67a5-49cf-a61a-37a44acbbe19","Type":"ContainerDied","Data":"2ff6119bc7bdded212166337823d1530f1ce03fc46b0826b2b3372aa77b0c84f"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.642808 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6957679874-pnq22" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.663442 4861 generic.go:334] "Generic (PLEG): container finished" podID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerID="c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.663709 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5ce76094-c71f-46c7-a69d-7d30d8540c5a","Type":"ContainerDied","Data":"c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.663786 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5ce76094-c71f-46c7-a69d-7d30d8540c5a","Type":"ContainerDied","Data":"24643f26c66512e243106d2edf41b11b2f702622b324f89a9e50b3c2f610202b"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.663547 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.668755 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-memcached-tls-certs\") pod \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.668796 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4z6g\" (UniqueName: \"kubernetes.io/projected/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kube-api-access-d4z6g\") pod \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.668850 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zxj2\" (UniqueName: \"kubernetes.io/projected/947c222c-8f0c-423f-84e8-75a4b9322829-kube-api-access-4zxj2\") pod \"947c222c-8f0c-423f-84e8-75a4b9322829\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.668871 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/947c222c-8f0c-423f-84e8-75a4b9322829-logs\") pod \"947c222c-8f0c-423f-84e8-75a4b9322829\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.668945 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data-custom\") pod \"947c222c-8f0c-423f-84e8-75a4b9322829\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.668964 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-combined-ca-bundle\") pod \"947c222c-8f0c-423f-84e8-75a4b9322829\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.669056 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kolla-config\") pod \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.669098 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-combined-ca-bundle\") pod \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.669148 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-public-tls-certs\") pod \"947c222c-8f0c-423f-84e8-75a4b9322829\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.669222 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data\") pod \"947c222c-8f0c-423f-84e8-75a4b9322829\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.669238 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-config-data\") pod \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\" (UID: \"34dfd085-c2bc-4fa4-a950-7df85c48fec0\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.669275 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-internal-tls-certs\") pod \"947c222c-8f0c-423f-84e8-75a4b9322829\" (UID: \"947c222c-8f0c-423f-84e8-75a4b9322829\") " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.669976 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/947c222c-8f0c-423f-84e8-75a4b9322829-logs" (OuterVolumeSpecName: "logs") pod "947c222c-8f0c-423f-84e8-75a4b9322829" (UID: "947c222c-8f0c-423f-84e8-75a4b9322829"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.670048 4861 generic.go:334] "Generic (PLEG): container finished" podID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerID="9dd5b64d9ec144641a738b5aa4db658de3394d4fbf3ece8178a1881822e737bf" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.670140 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10b22efc-707a-4ffc-8edc-44c39900ba2b","Type":"ContainerDied","Data":"9dd5b64d9ec144641a738b5aa4db658de3394d4fbf3ece8178a1881822e737bf"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.671758 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-config-data" (OuterVolumeSpecName: "config-data") pod "34dfd085-c2bc-4fa4-a950-7df85c48fec0" (UID: "34dfd085-c2bc-4fa4-a950-7df85c48fec0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.671830 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "34dfd085-c2bc-4fa4-a950-7df85c48fec0" (UID: "34dfd085-c2bc-4fa4-a950-7df85c48fec0"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672209 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672225 4861 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672237 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672248 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nphx5\" (UniqueName: \"kubernetes.io/projected/5ce76094-c71f-46c7-a69d-7d30d8540c5a-kube-api-access-nphx5\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672258 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/34dfd085-c2bc-4fa4-a950-7df85c48fec0-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672267 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672275 4861 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672286 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672295 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/947c222c-8f0c-423f-84e8-75a4b9322829-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.672302 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ce76094-c71f-46c7-a69d-7d30d8540c5a-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.684978 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-733f-account-create-update-v8gq6"] Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.686938 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-98fb-account-create-update-jv6v4" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.686946 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-98fb-account-create-update-jv6v4" event={"ID":"66a7721f-92e9-498c-97b8-cbe9890220d9","Type":"ContainerDied","Data":"ef908f445913aafc1c18fb5d053f6266894656d2d7f42a5a7bf2bee15946578c"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.687700 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-733f-account-create-update-v8gq6"] Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.694570 4861 generic.go:334] "Generic (PLEG): container finished" podID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerID="e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e" exitCode=0 Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.694647 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.694676 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.695168 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zh9b7" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.695239 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerDied","Data":"e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e"} Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.695704 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c008-account-create-update-cjb9h" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.696306 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "947c222c-8f0c-423f-84e8-75a4b9322829" (UID: "947c222c-8f0c-423f-84e8-75a4b9322829"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.697858 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/947c222c-8f0c-423f-84e8-75a4b9322829-kube-api-access-4zxj2" (OuterVolumeSpecName: "kube-api-access-4zxj2") pod "947c222c-8f0c-423f-84e8-75a4b9322829" (UID: "947c222c-8f0c-423f-84e8-75a4b9322829"). InnerVolumeSpecName "kube-api-access-4zxj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.698556 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-3399-account-create-update-w9zr8"] Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.703542 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-3399-account-create-update-w9zr8"] Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.707580 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kube-api-access-d4z6g" (OuterVolumeSpecName: "kube-api-access-d4z6g") pod "34dfd085-c2bc-4fa4-a950-7df85c48fec0" (UID: "34dfd085-c2bc-4fa4-a950-7df85c48fec0"). InnerVolumeSpecName "kube-api-access-d4z6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.723689 4861 scope.go:117] "RemoveContainer" containerID="a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.723819 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.777244 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7qxx\" (UniqueName: \"kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx\") pod \"keystone-abe6-account-create-update-dkvp7\" (UID: \"08bf60bc-3e57-4db1-8d7f-9c82ec4b5310\") " pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.777340 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts\") pod \"keystone-abe6-account-create-update-dkvp7\" (UID: \"08bf60bc-3e57-4db1-8d7f-9c82ec4b5310\") " pod="openstack/keystone-abe6-account-create-update-dkvp7" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.777445 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.777461 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.777480 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4z6g\" (UniqueName: \"kubernetes.io/projected/34dfd085-c2bc-4fa4-a950-7df85c48fec0-kube-api-access-d4z6g\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.777494 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zxj2\" (UniqueName: \"kubernetes.io/projected/947c222c-8f0c-423f-84e8-75a4b9322829-kube-api-access-4zxj2\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.777587 4861 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.777639 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts podName:08bf60bc-3e57-4db1-8d7f-9c82ec4b5310 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:41.777622584 +0000 UTC m=+1413.449117151 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts") pod "keystone-abe6-account-create-update-dkvp7" (UID: "08bf60bc-3e57-4db1-8d7f-9c82ec4b5310") : configmap "openstack-scripts" not found Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.778044 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "947c222c-8f0c-423f-84e8-75a4b9322829" (UID: "947c222c-8f0c-423f-84e8-75a4b9322829"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.788632 4861 projected.go:194] Error preparing data for projected volume kube-api-access-k7qxx for pod openstack/keystone-abe6-account-create-update-dkvp7: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.788697 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx podName:08bf60bc-3e57-4db1-8d7f-9c82ec4b5310 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:41.788679608 +0000 UTC m=+1413.460174175 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-k7qxx" (UniqueName: "kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx") pod "keystone-abe6-account-create-update-dkvp7" (UID: "08bf60bc-3e57-4db1-8d7f-9c82ec4b5310") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.841636 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ce76094-c71f-46c7-a69d-7d30d8540c5a" (UID: "5ce76094-c71f-46c7-a69d-7d30d8540c5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.853405 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b488de3-67a5-49cf-a61a-37a44acbbe19" (UID: "4b488de3-67a5-49cf-a61a-37a44acbbe19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.866412 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34dfd085-c2bc-4fa4-a950-7df85c48fec0" (UID: "34dfd085-c2bc-4fa4-a950-7df85c48fec0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.871390 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "953c17ea-50f0-4111-8bc1-16819c1bce47" (UID: "953c17ea-50f0-4111-8bc1-16819c1bce47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.879304 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.879331 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.879340 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.879350 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953c17ea-50f0-4111-8bc1-16819c1bce47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.879358 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.879352 4861 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 06:58:39 crc kubenswrapper[4861]: E0129 06:58:39.879423 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts podName:50ce05f5-34ea-4c94-ad92-9b458fa5c3c7 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:41.879405563 +0000 UTC m=+1413.550900130 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts") pod "root-account-create-update-5cwgv" (UID: "50ce05f5-34ea-4c94-ad92-9b458fa5c3c7") : configmap "openstack-scripts" not found Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.887202 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.899344 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" (UID: "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.942202 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-5cwgv"] Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.949747 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data" (OuterVolumeSpecName: "config-data") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.954639 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4b488de3-67a5-49cf-a61a-37a44acbbe19" (UID: "4b488de3-67a5-49cf-a61a-37a44acbbe19"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.959268 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4b488de3-67a5-49cf-a61a-37a44acbbe19" (UID: "4b488de3-67a5-49cf-a61a-37a44acbbe19"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.965557 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data" (OuterVolumeSpecName: "config-data") pod "947c222c-8f0c-423f-84e8-75a4b9322829" (UID: "947c222c-8f0c-423f-84e8-75a4b9322829"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.971472 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "947c222c-8f0c-423f-84e8-75a4b9322829" (UID: "947c222c-8f0c-423f-84e8-75a4b9322829"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.988848 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.988876 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.988886 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.988895 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.988906 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.988952 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.988963 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b488de3-67a5-49cf-a61a-37a44acbbe19-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:39 crc kubenswrapper[4861]: I0129 06:58:39.991858 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-config-data" (OuterVolumeSpecName: "config-data") pod "5ce76094-c71f-46c7-a69d-7d30d8540c5a" (UID: "5ce76094-c71f-46c7-a69d-7d30d8540c5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.010345 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "34dfd085-c2bc-4fa4-a950-7df85c48fec0" (UID: "34dfd085-c2bc-4fa4-a950-7df85c48fec0"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.010498 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 06:58:40 crc kubenswrapper[4861]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 06:58:40 crc kubenswrapper[4861]: Jan 29 06:58:40 crc kubenswrapper[4861]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 06:58:40 crc kubenswrapper[4861]: Jan 29 06:58:40 crc kubenswrapper[4861]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 06:58:40 crc kubenswrapper[4861]: Jan 29 06:58:40 crc kubenswrapper[4861]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 06:58:40 crc kubenswrapper[4861]: Jan 29 06:58:40 crc kubenswrapper[4861]: if [ -n "" ]; then Jan 29 06:58:40 crc kubenswrapper[4861]: GRANT_DATABASE="" Jan 29 06:58:40 crc kubenswrapper[4861]: else Jan 29 06:58:40 crc kubenswrapper[4861]: GRANT_DATABASE="*" Jan 29 06:58:40 crc kubenswrapper[4861]: fi Jan 29 06:58:40 crc kubenswrapper[4861]: Jan 29 06:58:40 crc kubenswrapper[4861]: # going for maximum compatibility here: Jan 29 06:58:40 crc kubenswrapper[4861]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 06:58:40 crc kubenswrapper[4861]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 06:58:40 crc kubenswrapper[4861]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 06:58:40 crc kubenswrapper[4861]: # support updates Jan 29 06:58:40 crc kubenswrapper[4861]: Jan 29 06:58:40 crc kubenswrapper[4861]: $MYSQL_CMD < logger="UnhandledError" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.017337 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-config-data" (OuterVolumeSpecName: "config-data") pod "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" (UID: "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.017397 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-5cwgv" podUID="50ce05f5-34ea-4c94-ad92-9b458fa5c3c7" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.030445 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5ce76094-c71f-46c7-a69d-7d30d8540c5a" (UID: "5ce76094-c71f-46c7-a69d-7d30d8540c5a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.037208 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" (UID: "e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.048261 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "947c222c-8f0c-423f-84e8-75a4b9322829" (UID: "947c222c-8f0c-423f-84e8-75a4b9322829"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.069184 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6c25bacb-4105-4fa4-a798-117f9cbe75fe" (UID: "6c25bacb-4105-4fa4-a798-117f9cbe75fe"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.090551 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.090578 4861 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/34dfd085-c2bc-4fa4-a950-7df85c48fec0-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.090587 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ce76094-c71f-46c7-a69d-7d30d8540c5a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.090596 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c25bacb-4105-4fa4-a798-117f9cbe75fe-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.090604 4861 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.090613 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.090621 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/947c222c-8f0c-423f-84e8-75a4b9322829-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.097286 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.107729 4861 scope.go:117] "RemoveContainer" containerID="380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.108678 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.108739 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9\": container with ID starting with 380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9 not found: ID does not exist" containerID="380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.108761 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9"} err="failed to get container status \"380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9\": rpc error: code = NotFound desc = could not find container \"380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9\": container with ID starting with 380eff0886d1b07ac438567257a0082d383a3dd522f0c138a0f6173b15364ee9 not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.108779 4861 scope.go:117] "RemoveContainer" containerID="f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.109126 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591\": container with ID starting with f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591 not found: ID does not exist" containerID="f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.109148 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591"} err="failed to get container status \"f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591\": rpc error: code = NotFound desc = could not find container \"f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591\": container with ID starting with f95a13ef72022700fcd0593bff15064778615e4ad57cf6f42a6e28830eeb5591 not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.109163 4861 scope.go:117] "RemoveContainer" containerID="a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.109489 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece\": container with ID starting with a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece not found: ID does not exist" containerID="a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.109563 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece"} err="failed to get container status \"a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece\": rpc error: code = NotFound desc = could not find container \"a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece\": container with ID starting with a8d524158947f924eab40d9bdb95ed7ad3624f2945d85b75f870de2e69a72ece not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.109595 4861 scope.go:117] "RemoveContainer" containerID="dfde65f746a284f0742bfc416b14134fe608d8f0ca69edc5ab5445ada8954bbe" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.124943 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.143187 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.151905 4861 scope.go:117] "RemoveContainer" containerID="1040f23ce9435abf9d98fe86eda2bd1c172d7b64d769b973274d71099bd7ad84" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.163689 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zh9b7"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.174117 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-zh9b7"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.174232 4861 scope.go:117] "RemoveContainer" containerID="114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.185206 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.190390 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.209299 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-logs" (OuterVolumeSpecName: "logs") pod "10b22efc-707a-4ffc-8edc-44c39900ba2b" (UID: "10b22efc-707a-4ffc-8edc-44c39900ba2b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.209581 4861 scope.go:117] "RemoveContainer" containerID="d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215404 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-logs\") pod \"10b22efc-707a-4ffc-8edc-44c39900ba2b\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215467 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-scripts\") pod \"10b22efc-707a-4ffc-8edc-44c39900ba2b\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215535 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gclbw\" (UniqueName: \"kubernetes.io/projected/bd9ed061-0329-42e0-8cca-e7b560c7a19c-kube-api-access-gclbw\") pod \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215589 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data\") pod \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215620 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-config-data\") pod \"10b22efc-707a-4ffc-8edc-44c39900ba2b\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215648 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxljp\" (UniqueName: \"kubernetes.io/projected/e78be7f2-60d4-4f0e-a510-bf5e652110d1-kube-api-access-fxljp\") pod \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215696 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-logs\") pod \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215728 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-combined-ca-bundle\") pod \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215755 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e78be7f2-60d4-4f0e-a510-bf5e652110d1-logs\") pod \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215809 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data\") pod \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215843 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data-custom\") pod \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215878 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data-custom\") pod \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215919 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-internal-tls-certs\") pod \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215951 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"10b22efc-707a-4ffc-8edc-44c39900ba2b\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.215980 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-combined-ca-bundle\") pod \"10b22efc-707a-4ffc-8edc-44c39900ba2b\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216008 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-config-data\") pod \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216042 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bvfs\" (UniqueName: \"kubernetes.io/projected/10b22efc-707a-4ffc-8edc-44c39900ba2b-kube-api-access-2bvfs\") pod \"10b22efc-707a-4ffc-8edc-44c39900ba2b\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216221 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9ed061-0329-42e0-8cca-e7b560c7a19c-logs\") pod \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216294 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhn6r\" (UniqueName: \"kubernetes.io/projected/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-kube-api-access-dhn6r\") pod \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216361 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-combined-ca-bundle\") pod \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\" (UID: \"bd9ed061-0329-42e0-8cca-e7b560c7a19c\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216431 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-public-tls-certs\") pod \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\" (UID: \"e78be7f2-60d4-4f0e-a510-bf5e652110d1\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216463 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-combined-ca-bundle\") pod \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\" (UID: \"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216541 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-httpd-run\") pod \"10b22efc-707a-4ffc-8edc-44c39900ba2b\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.216600 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-public-tls-certs\") pod \"10b22efc-707a-4ffc-8edc-44c39900ba2b\" (UID: \"10b22efc-707a-4ffc-8edc-44c39900ba2b\") " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.218460 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.220525 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "10b22efc-707a-4ffc-8edc-44c39900ba2b" (UID: "10b22efc-707a-4ffc-8edc-44c39900ba2b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.220527 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd9ed061-0329-42e0-8cca-e7b560c7a19c-logs" (OuterVolumeSpecName: "logs") pod "bd9ed061-0329-42e0-8cca-e7b560c7a19c" (UID: "bd9ed061-0329-42e0-8cca-e7b560c7a19c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.223762 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "10b22efc-707a-4ffc-8edc-44c39900ba2b" (UID: "10b22efc-707a-4ffc-8edc-44c39900ba2b"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.227490 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-logs" (OuterVolumeSpecName: "logs") pod "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" (UID: "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.229761 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e78be7f2-60d4-4f0e-a510-bf5e652110d1-logs" (OuterVolumeSpecName: "logs") pod "e78be7f2-60d4-4f0e-a510-bf5e652110d1" (UID: "e78be7f2-60d4-4f0e-a510-bf5e652110d1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.248952 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e78be7f2-60d4-4f0e-a510-bf5e652110d1-kube-api-access-fxljp" (OuterVolumeSpecName: "kube-api-access-fxljp") pod "e78be7f2-60d4-4f0e-a510-bf5e652110d1" (UID: "e78be7f2-60d4-4f0e-a510-bf5e652110d1"). InnerVolumeSpecName "kube-api-access-fxljp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.249593 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bd9ed061-0329-42e0-8cca-e7b560c7a19c" (UID: "bd9ed061-0329-42e0-8cca-e7b560c7a19c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.249692 4861 scope.go:117] "RemoveContainer" containerID="114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.250591 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194\": container with ID starting with 114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194 not found: ID does not exist" containerID="114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.250761 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194"} err="failed to get container status \"114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194\": rpc error: code = NotFound desc = could not find container \"114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194\": container with ID starting with 114ba4c7f55f00d4b44699f5820595d603ec581bd8cebccbfd05f5963c392194 not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.250870 4861 scope.go:117] "RemoveContainer" containerID="d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.251352 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea\": container with ID starting with d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea not found: ID does not exist" containerID="d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.251377 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10b22efc-707a-4ffc-8edc-44c39900ba2b-kube-api-access-2bvfs" (OuterVolumeSpecName: "kube-api-access-2bvfs") pod "10b22efc-707a-4ffc-8edc-44c39900ba2b" (UID: "10b22efc-707a-4ffc-8edc-44c39900ba2b"). InnerVolumeSpecName "kube-api-access-2bvfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.251397 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea"} err="failed to get container status \"d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea\": rpc error: code = NotFound desc = could not find container \"d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea\": container with ID starting with d36835ff2cd8a54569d1f967abe296e718bb927d8687570e53e239739e70ddea not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.251429 4861 scope.go:117] "RemoveContainer" containerID="a95782607794ffb075c46075902d651f0f6db3732fc62ef57c3c4e66ef00c4f4" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.259474 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd9ed061-0329-42e0-8cca-e7b560c7a19c-kube-api-access-gclbw" (OuterVolumeSpecName: "kube-api-access-gclbw") pod "bd9ed061-0329-42e0-8cca-e7b560c7a19c" (UID: "bd9ed061-0329-42e0-8cca-e7b560c7a19c"). InnerVolumeSpecName "kube-api-access-gclbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.282037 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-kube-api-access-dhn6r" (OuterVolumeSpecName: "kube-api-access-dhn6r") pod "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" (UID: "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3"). InnerVolumeSpecName "kube-api-access-dhn6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.286452 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" (UID: "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.311904 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.315247 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-scripts" (OuterVolumeSpecName: "scripts") pod "10b22efc-707a-4ffc-8edc-44c39900ba2b" (UID: "10b22efc-707a-4ffc-8edc-44c39900ba2b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324374 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxljp\" (UniqueName: \"kubernetes.io/projected/e78be7f2-60d4-4f0e-a510-bf5e652110d1-kube-api-access-fxljp\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324400 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324433 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e78be7f2-60d4-4f0e-a510-bf5e652110d1-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324442 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324451 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324482 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324514 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bvfs\" (UniqueName: \"kubernetes.io/projected/10b22efc-707a-4ffc-8edc-44c39900ba2b-kube-api-access-2bvfs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324528 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9ed061-0329-42e0-8cca-e7b560c7a19c-logs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324537 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhn6r\" (UniqueName: \"kubernetes.io/projected/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-kube-api-access-dhn6r\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324545 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10b22efc-707a-4ffc-8edc-44c39900ba2b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324554 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.324565 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gclbw\" (UniqueName: \"kubernetes.io/projected/bd9ed061-0329-42e0-8cca-e7b560c7a19c-kube-api-access-gclbw\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.328382 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e78be7f2-60d4-4f0e-a510-bf5e652110d1" (UID: "e78be7f2-60d4-4f0e-a510-bf5e652110d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.370850 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.375000 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-config-data" (OuterVolumeSpecName: "config-data") pod "e78be7f2-60d4-4f0e-a510-bf5e652110d1" (UID: "e78be7f2-60d4-4f0e-a510-bf5e652110d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.380871 4861 scope.go:117] "RemoveContainer" containerID="9a86c0075a1b39a77b81d03d33eaf3de19430f272ef69df32ef5227eb2cfdfbd" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.390765 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.418675 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-abe6-account-create-update-dkvp7"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.422818 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e78be7f2-60d4-4f0e-a510-bf5e652110d1" (UID: "e78be7f2-60d4-4f0e-a510-bf5e652110d1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.428785 4861 scope.go:117] "RemoveContainer" containerID="4c604e0c72a77e1a59db1a8f51efca23cb07d1fdc756481cde528fd19e684c10" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.428788 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data" (OuterVolumeSpecName: "config-data") pod "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" (UID: "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.429561 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-abe6-account-create-update-dkvp7"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.430648 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.430670 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.430681 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.430691 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.430718 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.434161 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" (UID: "09dd2891-14bd-4b67-a7d8-26d74fcaa6a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.436278 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data" (OuterVolumeSpecName: "config-data") pod "bd9ed061-0329-42e0-8cca-e7b560c7a19c" (UID: "bd9ed061-0329-42e0-8cca-e7b560c7a19c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.449134 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-98fb-account-create-update-jv6v4"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.450927 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-config-data" (OuterVolumeSpecName: "config-data") pod "10b22efc-707a-4ffc-8edc-44c39900ba2b" (UID: "10b22efc-707a-4ffc-8edc-44c39900ba2b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.454987 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-98fb-account-create-update-jv6v4"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.465574 4861 scope.go:117] "RemoveContainer" containerID="644d3d4ecb0360b4f4094c99e7ffd6babd80fb95a501022e6a4b47e201e4406b" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.476219 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10b22efc-707a-4ffc-8edc-44c39900ba2b" (UID: "10b22efc-707a-4ffc-8edc-44c39900ba2b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.486651 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-c008-account-create-update-cjb9h"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.490531 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd9ed061-0329-42e0-8cca-e7b560c7a19c" (UID: "bd9ed061-0329-42e0-8cca-e7b560c7a19c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.495204 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-c008-account-create-update-cjb9h"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.513241 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "10b22efc-707a-4ffc-8edc-44c39900ba2b" (UID: "10b22efc-707a-4ffc-8edc-44c39900ba2b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.515188 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.520577 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e78be7f2-60d4-4f0e-a510-bf5e652110d1" (UID: "e78be7f2-60d4-4f0e-a510-bf5e652110d1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.523281 4861 scope.go:117] "RemoveContainer" containerID="64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.527252 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532181 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532206 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7qxx\" (UniqueName: \"kubernetes.io/projected/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-kube-api-access-k7qxx\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532215 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532225 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532234 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e78be7f2-60d4-4f0e-a510-bf5e652110d1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532242 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10b22efc-707a-4ffc-8edc-44c39900ba2b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532251 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532259 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9ed061-0329-42e0-8cca-e7b560c7a19c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.532267 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.540536 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.545007 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.550110 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.560005 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.560066 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-6957679874-pnq22"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.563930 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-6957679874-pnq22"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.568411 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d8b667488-v7lmh"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.574856 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-d8b667488-v7lmh"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.589119 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.596630 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.601250 4861 scope.go:117] "RemoveContainer" containerID="403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.645646 4861 scope.go:117] "RemoveContainer" containerID="64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.645951 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8\": container with ID starting with 64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8 not found: ID does not exist" containerID="64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.645980 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8"} err="failed to get container status \"64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8\": rpc error: code = NotFound desc = could not find container \"64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8\": container with ID starting with 64d7d1642a2ff88cc72ca6f7d545d2a8d3a1c6d0129d7942edd7fcc456ff17b8 not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.645998 4861 scope.go:117] "RemoveContainer" containerID="403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.646288 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b\": container with ID starting with 403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b not found: ID does not exist" containerID="403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.646363 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b"} err="failed to get container status \"403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b\": rpc error: code = NotFound desc = could not find container \"403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b\": container with ID starting with 403f4c83ab891508da53a0ee2a5357a9c2b581ecb039b58f5901fe7d8ad8e52b not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.646377 4861 scope.go:117] "RemoveContainer" containerID="c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.666050 4861 scope.go:117] "RemoveContainer" containerID="1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.732085 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6b8d96575c-7zzfv" event={"ID":"09dd2891-14bd-4b67-a7d8-26d74fcaa6a3","Type":"ContainerDied","Data":"f80d75578861477ec8aac21ea801e295ac62635998a5c081df5e90b95cdd1e45"} Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.732144 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6b8d96575c-7zzfv" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.737581 4861 generic.go:334] "Generic (PLEG): container finished" podID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerID="9c170b6b24190e8407f34d3e2aa2e80d167bb93c61c16add369b12444c54d78a" exitCode=0 Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.737638 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8c370e6a-40e9-4055-857e-c8357c904c8e","Type":"ContainerDied","Data":"9c170b6b24190e8407f34d3e2aa2e80d167bb93c61c16add369b12444c54d78a"} Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.744801 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10b22efc-707a-4ffc-8edc-44c39900ba2b","Type":"ContainerDied","Data":"7d02af97aadcaccb8482a8772034ffa20d2d4129ccc5be13e4f2477ade5e3cec"} Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.744828 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.756719 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" event={"ID":"bd9ed061-0329-42e0-8cca-e7b560c7a19c","Type":"ContainerDied","Data":"fceb21f2b2390152827167d4a2d6ba481059545e6ee770eebaf47b6ac1c3d8cd"} Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.756769 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-c8ddb67f8-pqqd9" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.770863 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e78be7f2-60d4-4f0e-a510-bf5e652110d1","Type":"ContainerDied","Data":"f60336bd1f36360535654dc36904d81898276cd075922ad637ffe7e1bc175fb6"} Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.770904 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.772866 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5cwgv" event={"ID":"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7","Type":"ContainerStarted","Data":"2d8833279451f48074e5a5adadfe954088110ce54f5d9c3d627a45f367e30b92"} Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.790133 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-z5wvn" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" probeResult="failure" output="command timed out" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.806695 4861 scope.go:117] "RemoveContainer" containerID="c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.806777 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.810157 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235\": container with ID starting with c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235 not found: ID does not exist" containerID="c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.810481 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235"} err="failed to get container status \"c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235\": rpc error: code = NotFound desc = could not find container \"c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235\": container with ID starting with c1f221c7fe46e12bcee728a52140203f52ee596b400af8bf186b52d94f77e235 not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.810510 4861 scope.go:117] "RemoveContainer" containerID="1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483" Jan 29 06:58:40 crc kubenswrapper[4861]: E0129 06:58:40.811889 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483\": container with ID starting with 1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483 not found: ID does not exist" containerID="1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.811973 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483"} err="failed to get container status \"1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483\": rpc error: code = NotFound desc = could not find container \"1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483\": container with ID starting with 1098426a56ca153de5a6e9b13661ec02793e2e99e157c3974ce355446a125483 not found: ID does not exist" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.812005 4861 scope.go:117] "RemoveContainer" containerID="12d75e57461b87b2ec9d6d00c1b304c2545872ff7dcee032f19e27ff512c2516" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.814410 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.844783 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6b8d96575c-7zzfv"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.858003 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-6b8d96575c-7zzfv"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.863523 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.868169 4861 scope.go:117] "RemoveContainer" containerID="75481cbc7bf2e4776643277e100d6f7fcc456f612bd0f4c451db4c8198750b42" Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.868562 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.872763 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-c8ddb67f8-pqqd9"] Jan 29 06:58:40 crc kubenswrapper[4861]: I0129 06:58:40.877115 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-c8ddb67f8-pqqd9"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:40.943816 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:40.945212 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:40.947636 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:40.947659 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:40.947685 4861 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:40.954004 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:40.956743 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:40.957080 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:40.961551 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:40.970539 4861 scope.go:117] "RemoveContainer" containerID="9dd5b64d9ec144641a738b5aa4db658de3394d4fbf3ece8178a1881822e737bf" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.004693 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-z5wvn" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" probeResult="failure" output=< Jan 29 06:58:41 crc kubenswrapper[4861]: ERROR - Failed to get connection status from ovn-controller, ovn-appctl exit status: 0 Jan 29 06:58:41 crc kubenswrapper[4861]: > Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.049909 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"8c370e6a-40e9-4055-857e-c8357c904c8e\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.050050 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-default\") pod \"8c370e6a-40e9-4055-857e-c8357c904c8e\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.050147 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-galera-tls-certs\") pod \"8c370e6a-40e9-4055-857e-c8357c904c8e\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.051212 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "8c370e6a-40e9-4055-857e-c8357c904c8e" (UID: "8c370e6a-40e9-4055-857e-c8357c904c8e"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.051376 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-operator-scripts\") pod \"8c370e6a-40e9-4055-857e-c8357c904c8e\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.051474 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-generated\") pod \"8c370e6a-40e9-4055-857e-c8357c904c8e\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.051545 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-combined-ca-bundle\") pod \"8c370e6a-40e9-4055-857e-c8357c904c8e\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.051619 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk478\" (UniqueName: \"kubernetes.io/projected/8c370e6a-40e9-4055-857e-c8357c904c8e-kube-api-access-nk478\") pod \"8c370e6a-40e9-4055-857e-c8357c904c8e\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.051655 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-kolla-config\") pod \"8c370e6a-40e9-4055-857e-c8357c904c8e\" (UID: \"8c370e6a-40e9-4055-857e-c8357c904c8e\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.052119 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.052640 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "8c370e6a-40e9-4055-857e-c8357c904c8e" (UID: "8c370e6a-40e9-4055-857e-c8357c904c8e"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.053336 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8c370e6a-40e9-4055-857e-c8357c904c8e" (UID: "8c370e6a-40e9-4055-857e-c8357c904c8e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.054418 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "8c370e6a-40e9-4055-857e-c8357c904c8e" (UID: "8c370e6a-40e9-4055-857e-c8357c904c8e"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.060218 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "mysql-db") pod "8c370e6a-40e9-4055-857e-c8357c904c8e" (UID: "8c370e6a-40e9-4055-857e-c8357c904c8e"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.076426 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c370e6a-40e9-4055-857e-c8357c904c8e" (UID: "8c370e6a-40e9-4055-857e-c8357c904c8e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.084283 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c370e6a-40e9-4055-857e-c8357c904c8e-kube-api-access-nk478" (OuterVolumeSpecName: "kube-api-access-nk478") pod "8c370e6a-40e9-4055-857e-c8357c904c8e" (UID: "8c370e6a-40e9-4055-857e-c8357c904c8e"). InnerVolumeSpecName "kube-api-access-nk478". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.093681 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.095123 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "8c370e6a-40e9-4055-857e-c8357c904c8e" (UID: "8c370e6a-40e9-4055-857e-c8357c904c8e"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.137096 4861 scope.go:117] "RemoveContainer" containerID="f4608109881c8d879d0747a15002faa7be33fa42a0ab54b3b737788b5adb25d7" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.156399 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08bf60bc-3e57-4db1-8d7f-9c82ec4b5310" path="/var/lib/kubelet/pods/08bf60bc-3e57-4db1-8d7f-9c82ec4b5310/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.156865 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" path="/var/lib/kubelet/pods/09dd2891-14bd-4b67-a7d8-26d74fcaa6a3/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.157537 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" path="/var/lib/kubelet/pods/10b22efc-707a-4ffc-8edc-44c39900ba2b/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.158401 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzdk9\" (UniqueName: \"kubernetes.io/projected/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-kube-api-access-dzdk9\") pod \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\" (UID: \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.158560 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts\") pod \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\" (UID: \"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.160383 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk478\" (UniqueName: \"kubernetes.io/projected/8c370e6a-40e9-4055-857e-c8357c904c8e-kube-api-access-nk478\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.160415 4861 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.160450 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.160468 4861 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.160487 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c370e6a-40e9-4055-857e-c8357c904c8e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.160505 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8c370e6a-40e9-4055-857e-c8357c904c8e-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.160521 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c370e6a-40e9-4055-857e-c8357c904c8e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.161371 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f95675a-f692-4c29-90da-01eda11003ac" path="/var/lib/kubelet/pods/1f95675a-f692-4c29-90da-01eda11003ac/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.161878 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "50ce05f5-34ea-4c94-ad92-9b458fa5c3c7" (UID: "50ce05f5-34ea-4c94-ad92-9b458fa5c3c7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.162942 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34dfd085-c2bc-4fa4-a950-7df85c48fec0" path="/var/lib/kubelet/pods/34dfd085-c2bc-4fa4-a950-7df85c48fec0/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.163807 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3949b327-31a1-4dfa-bc04-e13b6c033ecd" path="/var/lib/kubelet/pods/3949b327-31a1-4dfa-bc04-e13b6c033ecd/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.164217 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="441b7714-dd72-448b-a5a8-b9f56057da43" path="/var/lib/kubelet/pods/441b7714-dd72-448b-a5a8-b9f56057da43/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.164569 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" path="/var/lib/kubelet/pods/4b488de3-67a5-49cf-a61a-37a44acbbe19/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.165592 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" path="/var/lib/kubelet/pods/5ce76094-c71f-46c7-a69d-7d30d8540c5a/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.166151 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66a7721f-92e9-498c-97b8-cbe9890220d9" path="/var/lib/kubelet/pods/66a7721f-92e9-498c-97b8-cbe9890220d9/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.166530 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" path="/var/lib/kubelet/pods/6c25bacb-4105-4fa4-a798-117f9cbe75fe/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.167219 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-kube-api-access-dzdk9" (OuterVolumeSpecName: "kube-api-access-dzdk9") pod "50ce05f5-34ea-4c94-ad92-9b458fa5c3c7" (UID: "50ce05f5-34ea-4c94-ad92-9b458fa5c3c7"). InnerVolumeSpecName "kube-api-access-dzdk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.167295 4861 scope.go:117] "RemoveContainer" containerID="c312a62932c0664c9e8bde4d291603923f2e2f621322670c3abe137b105616db" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.168053 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" path="/var/lib/kubelet/pods/947c222c-8f0c-423f-84e8-75a4b9322829/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.168592 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="953c17ea-50f0-4111-8bc1-16819c1bce47" path="/var/lib/kubelet/pods/953c17ea-50f0-4111-8bc1-16819c1bce47/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.169019 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7d75aa6-1d18-4901-874c-e6b9db142421" path="/var/lib/kubelet/pods/b7d75aa6-1d18-4901-874c-e6b9db142421/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.169912 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" path="/var/lib/kubelet/pods/bd9ed061-0329-42e0-8cca-e7b560c7a19c/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.170567 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d72b59e5-64c2-4eab-955e-89d6298e834e" path="/var/lib/kubelet/pods/d72b59e5-64c2-4eab-955e-89d6298e834e/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.171225 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" path="/var/lib/kubelet/pods/e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.172148 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" path="/var/lib/kubelet/pods/e78be7f2-60d4-4f0e-a510-bf5e652110d1/volumes" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.194664 4861 scope.go:117] "RemoveContainer" containerID="e098f103ebd71969e2f8e2fd838a304a15e40397ed8a4eba94af458e0afc7a28" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.197307 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.213445 4861 scope.go:117] "RemoveContainer" containerID="d3f897c89c2801586a375c91d0d6297c2d965784611ff0abf1834bdaf78b6197" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.239000 4861 scope.go:117] "RemoveContainer" containerID="0376f68adc682a637da6dfee7b2c102f83dc2d0b2a50def1ae4ad71bb1486b5e" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.261776 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzdk9\" (UniqueName: \"kubernetes.io/projected/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-kube-api-access-dzdk9\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.261804 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.261814 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:41.464672 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:41.464801 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data podName:3b8b1385-123a-4b60-af39-82d6492a65c2 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:49.464757119 +0000 UTC m=+1421.136251686 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data") pod "rabbitmq-cell1-server-0" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2") : configmap "rabbitmq-cell1-config-data" not found Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.538424 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.206:3000/\": dial tcp 10.217.0.206:3000: connect: connection refused" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.797124 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-5cwgv" event={"ID":"50ce05f5-34ea-4c94-ad92-9b458fa5c3c7","Type":"ContainerDied","Data":"2d8833279451f48074e5a5adadfe954088110ce54f5d9c3d627a45f367e30b92"} Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.797585 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-5cwgv" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.801320 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_93a9df75-0ea9-457b-84f0-17b95d5dcced/ovn-northd/0.log" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.801371 4861 generic.go:334] "Generic (PLEG): container finished" podID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerID="82d4301c7f8e1b6d25f3d60567395a1ff2635c17934b75e63917065ada770d83" exitCode=139 Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.801431 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"93a9df75-0ea9-457b-84f0-17b95d5dcced","Type":"ContainerDied","Data":"82d4301c7f8e1b6d25f3d60567395a1ff2635c17934b75e63917065ada770d83"} Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.801461 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"93a9df75-0ea9-457b-84f0-17b95d5dcced","Type":"ContainerDied","Data":"162f15ef0121a63ab7849141694da0a5105505ef7dd8d76872e40ac19151c918"} Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.801472 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="162f15ef0121a63ab7849141694da0a5105505ef7dd8d76872e40ac19151c918" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.803467 4861 generic.go:334] "Generic (PLEG): container finished" podID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerID="df85d7b79b6e3d17ea7765b219c520d147b988eede5ce5119c6fe36e62177544" exitCode=0 Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.803579 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b8b1385-123a-4b60-af39-82d6492a65c2","Type":"ContainerDied","Data":"df85d7b79b6e3d17ea7765b219c520d147b988eede5ce5119c6fe36e62177544"} Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.821740 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.821518 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8c370e6a-40e9-4055-857e-c8357c904c8e","Type":"ContainerDied","Data":"d319f43b0a3b9bbe7170779bb9517e16cd65501660e2e82f024feec0415781ba"} Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.823247 4861 scope.go:117] "RemoveContainer" containerID="9c170b6b24190e8407f34d3e2aa2e80d167bb93c61c16add369b12444c54d78a" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.824938 4861 generic.go:334] "Generic (PLEG): container finished" podID="27188e95-6192-4569-b254-c1e2d9b28086" containerID="9d22b040aef5f1212a99cca021391b8a04401e06ef3cba31d31cf1356747f059" exitCode=0 Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.825024 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b7fd56548-f8c7z" event={"ID":"27188e95-6192-4569-b254-c1e2d9b28086","Type":"ContainerDied","Data":"9d22b040aef5f1212a99cca021391b8a04401e06ef3cba31d31cf1356747f059"} Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.863838 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_93a9df75-0ea9-457b-84f0-17b95d5dcced/ovn-northd/0.log" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.863901 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.880482 4861 scope.go:117] "RemoveContainer" containerID="4c1ce7cacae4207060ac4f5331c4d327e43e77b67b6c58c3045038a6a4ddde7c" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.880635 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.880705 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:41.893922 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.896607 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:41.910354 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.914522 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-5cwgv"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:41.920922 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 06:58:41 crc kubenswrapper[4861]: E0129 06:58:41.921002 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="c749a121-7e8e-4d49-8a30-c27fa21926b5" containerName="nova-cell1-conductor-conductor" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.921450 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-5cwgv"] Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974525 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8q9w\" (UniqueName: \"kubernetes.io/projected/27188e95-6192-4569-b254-c1e2d9b28086-kube-api-access-z8q9w\") pod \"27188e95-6192-4569-b254-c1e2d9b28086\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974572 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-config-data\") pod \"27188e95-6192-4569-b254-c1e2d9b28086\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974623 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64dvp\" (UniqueName: \"kubernetes.io/projected/93a9df75-0ea9-457b-84f0-17b95d5dcced-kube-api-access-64dvp\") pod \"93a9df75-0ea9-457b-84f0-17b95d5dcced\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974643 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-scripts\") pod \"27188e95-6192-4569-b254-c1e2d9b28086\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974675 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-fernet-keys\") pod \"27188e95-6192-4569-b254-c1e2d9b28086\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974693 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-credential-keys\") pod \"27188e95-6192-4569-b254-c1e2d9b28086\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974715 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-scripts\") pod \"93a9df75-0ea9-457b-84f0-17b95d5dcced\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974740 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-metrics-certs-tls-certs\") pod \"93a9df75-0ea9-457b-84f0-17b95d5dcced\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974776 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-combined-ca-bundle\") pod \"93a9df75-0ea9-457b-84f0-17b95d5dcced\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974904 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-rundir\") pod \"93a9df75-0ea9-457b-84f0-17b95d5dcced\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974933 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-combined-ca-bundle\") pod \"27188e95-6192-4569-b254-c1e2d9b28086\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974962 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-public-tls-certs\") pod \"27188e95-6192-4569-b254-c1e2d9b28086\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.974989 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-internal-tls-certs\") pod \"27188e95-6192-4569-b254-c1e2d9b28086\" (UID: \"27188e95-6192-4569-b254-c1e2d9b28086\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.975009 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-northd-tls-certs\") pod \"93a9df75-0ea9-457b-84f0-17b95d5dcced\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.975044 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-config\") pod \"93a9df75-0ea9-457b-84f0-17b95d5dcced\" (UID: \"93a9df75-0ea9-457b-84f0-17b95d5dcced\") " Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.976047 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-config" (OuterVolumeSpecName: "config") pod "93a9df75-0ea9-457b-84f0-17b95d5dcced" (UID: "93a9df75-0ea9-457b-84f0-17b95d5dcced"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.976208 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-scripts" (OuterVolumeSpecName: "scripts") pod "93a9df75-0ea9-457b-84f0-17b95d5dcced" (UID: "93a9df75-0ea9-457b-84f0-17b95d5dcced"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.979034 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "93a9df75-0ea9-457b-84f0-17b95d5dcced" (UID: "93a9df75-0ea9-457b-84f0-17b95d5dcced"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.981231 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-scripts" (OuterVolumeSpecName: "scripts") pod "27188e95-6192-4569-b254-c1e2d9b28086" (UID: "27188e95-6192-4569-b254-c1e2d9b28086"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.987038 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27188e95-6192-4569-b254-c1e2d9b28086-kube-api-access-z8q9w" (OuterVolumeSpecName: "kube-api-access-z8q9w") pod "27188e95-6192-4569-b254-c1e2d9b28086" (UID: "27188e95-6192-4569-b254-c1e2d9b28086"). InnerVolumeSpecName "kube-api-access-z8q9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.990282 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93a9df75-0ea9-457b-84f0-17b95d5dcced-kube-api-access-64dvp" (OuterVolumeSpecName: "kube-api-access-64dvp") pod "93a9df75-0ea9-457b-84f0-17b95d5dcced" (UID: "93a9df75-0ea9-457b-84f0-17b95d5dcced"). InnerVolumeSpecName "kube-api-access-64dvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:41 crc kubenswrapper[4861]: I0129 06:58:41.994778 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "27188e95-6192-4569-b254-c1e2d9b28086" (UID: "27188e95-6192-4569-b254-c1e2d9b28086"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.019809 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "27188e95-6192-4569-b254-c1e2d9b28086" (UID: "27188e95-6192-4569-b254-c1e2d9b28086"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.045063 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27188e95-6192-4569-b254-c1e2d9b28086" (UID: "27188e95-6192-4569-b254-c1e2d9b28086"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.052961 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93a9df75-0ea9-457b-84f0-17b95d5dcced" (UID: "93a9df75-0ea9-457b-84f0-17b95d5dcced"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.058717 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-config-data" (OuterVolumeSpecName: "config-data") pod "27188e95-6192-4569-b254-c1e2d9b28086" (UID: "27188e95-6192-4569-b254-c1e2d9b28086"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.058798 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "27188e95-6192-4569-b254-c1e2d9b28086" (UID: "27188e95-6192-4569-b254-c1e2d9b28086"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.063625 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "27188e95-6192-4569-b254-c1e2d9b28086" (UID: "27188e95-6192-4569-b254-c1e2d9b28086"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.075254 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "93a9df75-0ea9-457b-84f0-17b95d5dcced" (UID: "93a9df75-0ea9-457b-84f0-17b95d5dcced"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.081968 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082007 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8q9w\" (UniqueName: \"kubernetes.io/projected/27188e95-6192-4569-b254-c1e2d9b28086-kube-api-access-z8q9w\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082018 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082029 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64dvp\" (UniqueName: \"kubernetes.io/projected/93a9df75-0ea9-457b-84f0-17b95d5dcced-kube-api-access-64dvp\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082037 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082045 4861 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082052 4861 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082061 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93a9df75-0ea9-457b-84f0-17b95d5dcced-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082083 4861 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082091 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082098 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082106 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082114 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.082122 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/27188e95-6192-4569-b254-c1e2d9b28086-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.085619 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.110641 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "93a9df75-0ea9-457b-84f0-17b95d5dcced" (UID: "93a9df75-0ea9-457b-84f0-17b95d5dcced"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182636 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-plugins-conf\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182689 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b8b1385-123a-4b60-af39-82d6492a65c2-pod-info\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182719 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b8b1385-123a-4b60-af39-82d6492a65c2-erlang-cookie-secret\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182757 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqc5q\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-kube-api-access-bqc5q\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182784 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-erlang-cookie\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182836 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-plugins\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182891 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182923 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-tls\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.182985 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.183000 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-confd\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.183032 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-server-conf\") pod \"3b8b1385-123a-4b60-af39-82d6492a65c2\" (UID: \"3b8b1385-123a-4b60-af39-82d6492a65c2\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.183283 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.183484 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.183776 4861 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.183796 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/93a9df75-0ea9-457b-84f0-17b95d5dcced-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.183807 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.184105 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.185321 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/3b8b1385-123a-4b60-af39-82d6492a65c2-pod-info" (OuterVolumeSpecName: "pod-info") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.186415 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "persistence") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.187708 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.188997 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-kube-api-access-bqc5q" (OuterVolumeSpecName: "kube-api-access-bqc5q") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "kube-api-access-bqc5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.189250 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b8b1385-123a-4b60-af39-82d6492a65c2-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.200969 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data" (OuterVolumeSpecName: "config-data") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.225432 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-server-conf" (OuterVolumeSpecName: "server-conf") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.276481 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "3b8b1385-123a-4b60-af39-82d6492a65c2" (UID: "3b8b1385-123a-4b60-af39-82d6492a65c2"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285503 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqc5q\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-kube-api-access-bqc5q\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285532 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285558 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285567 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285576 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285584 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b8b1385-123a-4b60-af39-82d6492a65c2-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285593 4861 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b8b1385-123a-4b60-af39-82d6492a65c2-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285602 4861 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b8b1385-123a-4b60-af39-82d6492a65c2-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.285610 4861 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b8b1385-123a-4b60-af39-82d6492a65c2-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.306560 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.360215 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.386535 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: E0129 06:58:42.386643 4861 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 06:58:42 crc kubenswrapper[4861]: E0129 06:58:42.386980 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data podName:5966cedc-8ab5-4390-906b-c5ac39333e09 nodeName:}" failed. No retries permitted until 2026-01-29 06:58:50.386959629 +0000 UTC m=+1422.058454206 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data") pod "rabbitmq-server-0" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09") : configmap "rabbitmq-config-data" not found Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.488393 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-erlang-cookie\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.488557 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvspp\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-kube-api-access-mvspp\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.488623 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-confd\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.488709 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.488778 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5966cedc-8ab5-4390-906b-c5ac39333e09-erlang-cookie-secret\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.488823 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-tls\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.488875 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.488966 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-server-conf\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.489010 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-plugins-conf\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.489103 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-plugins\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.489228 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5966cedc-8ab5-4390-906b-c5ac39333e09-pod-info\") pod \"5966cedc-8ab5-4390-906b-c5ac39333e09\" (UID: \"5966cedc-8ab5-4390-906b-c5ac39333e09\") " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.489601 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.489978 4861 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.491652 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.492193 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.492211 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.494364 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.495285 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-kube-api-access-mvspp" (OuterVolumeSpecName: "kube-api-access-mvspp") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "kube-api-access-mvspp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.497355 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5966cedc-8ab5-4390-906b-c5ac39333e09-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.497452 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/5966cedc-8ab5-4390-906b-c5ac39333e09-pod-info" (OuterVolumeSpecName: "pod-info") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.527030 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data" (OuterVolumeSpecName: "config-data") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.546108 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-server-conf" (OuterVolumeSpecName: "server-conf") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: E0129 06:58:42.575696 4861 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 29 06:58:42 crc kubenswrapper[4861]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-01-29T06:58:35Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 06:58:42 crc kubenswrapper[4861]: /etc/init.d/functions: line 589: 393 Alarm clock "$@" Jan 29 06:58:42 crc kubenswrapper[4861]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-z5wvn" message=< Jan 29 06:58:42 crc kubenswrapper[4861]: Exiting ovn-controller (1) [FAILED] Jan 29 06:58:42 crc kubenswrapper[4861]: Killing ovn-controller (1) [ OK ] Jan 29 06:58:42 crc kubenswrapper[4861]: Killing ovn-controller (1) with SIGKILL [ OK ] Jan 29 06:58:42 crc kubenswrapper[4861]: 2026-01-29T06:58:35Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 06:58:42 crc kubenswrapper[4861]: /etc/init.d/functions: line 589: 393 Alarm clock "$@" Jan 29 06:58:42 crc kubenswrapper[4861]: > Jan 29 06:58:42 crc kubenswrapper[4861]: E0129 06:58:42.575735 4861 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 29 06:58:42 crc kubenswrapper[4861]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-01-29T06:58:35Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 06:58:42 crc kubenswrapper[4861]: /etc/init.d/functions: line 589: 393 Alarm clock "$@" Jan 29 06:58:42 crc kubenswrapper[4861]: > pod="openstack/ovn-controller-z5wvn" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" containerID="cri-o://43ed60341599191e6dc9eb1bb3d03dd08460301dbced2c46cda3df1d5546241f" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.575782 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-z5wvn" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" containerID="cri-o://43ed60341599191e6dc9eb1bb3d03dd08460301dbced2c46cda3df1d5546241f" gracePeriod=22 Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591224 4861 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5966cedc-8ab5-4390-906b-c5ac39333e09-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591257 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591271 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvspp\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-kube-api-access-mvspp\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591292 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591301 4861 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5966cedc-8ab5-4390-906b-c5ac39333e09-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591309 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591317 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591326 4861 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5966cedc-8ab5-4390-906b-c5ac39333e09-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.591338 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.602799 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "5966cedc-8ab5-4390-906b-c5ac39333e09" (UID: "5966cedc-8ab5-4390-906b-c5ac39333e09"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.606156 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.693721 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5966cedc-8ab5-4390-906b-c5ac39333e09-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.693778 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.835001 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b8b1385-123a-4b60-af39-82d6492a65c2","Type":"ContainerDied","Data":"67a1475dfbada9b85e2a0e8ee90f3cf79235d0ae9ebb592aa8e44eea88f5924a"} Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.835051 4861 scope.go:117] "RemoveContainer" containerID="df85d7b79b6e3d17ea7765b219c520d147b988eede5ce5119c6fe36e62177544" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.835158 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.839136 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b7fd56548-f8c7z" event={"ID":"27188e95-6192-4569-b254-c1e2d9b28086","Type":"ContainerDied","Data":"5284c41c7efbbae3ed2c2474677c06318dba9f788a1f7ce53235f393ed83b41e"} Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.839218 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b7fd56548-f8c7z" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.843199 4861 generic.go:334] "Generic (PLEG): container finished" podID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerID="b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777" exitCode=0 Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.843282 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5966cedc-8ab5-4390-906b-c5ac39333e09","Type":"ContainerDied","Data":"b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777"} Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.843330 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5966cedc-8ab5-4390-906b-c5ac39333e09","Type":"ContainerDied","Data":"f7185532535e4d49a2dc75b14a2352f7b4e01086e0368adf56f37bf0e4b29b31"} Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.843415 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.847391 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-z5wvn_5b52afb6-32de-4f14-9663-adeec08b4fad/ovn-controller/0.log" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.847439 4861 generic.go:334] "Generic (PLEG): container finished" podID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerID="43ed60341599191e6dc9eb1bb3d03dd08460301dbced2c46cda3df1d5546241f" exitCode=137 Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.847516 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.849189 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn" event={"ID":"5b52afb6-32de-4f14-9663-adeec08b4fad","Type":"ContainerDied","Data":"43ed60341599191e6dc9eb1bb3d03dd08460301dbced2c46cda3df1d5546241f"} Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.869419 4861 scope.go:117] "RemoveContainer" containerID="2808f05e16652e14e080ca41ff8920b6abdf36850a94e302e37f9f0b96a4b421" Jan 29 06:58:42 crc kubenswrapper[4861]: E0129 06:58:42.903480 4861 secret.go:188] Couldn't get secret openstack/neutron-httpd-config: secret "neutron-httpd-config" not found Jan 29 06:58:42 crc kubenswrapper[4861]: E0129 06:58:42.903558 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:50.903536047 +0000 UTC m=+1422.575030604 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "httpd-config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-httpd-config" not found Jan 29 06:58:42 crc kubenswrapper[4861]: E0129 06:58:42.904498 4861 secret.go:188] Couldn't get secret openstack/neutron-config: secret "neutron-config" not found Jan 29 06:58:42 crc kubenswrapper[4861]: E0129 06:58:42.904541 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config podName:ce4279f2-eded-42d5-9353-5235a6b7d64e nodeName:}" failed. No retries permitted until 2026-01-29 06:58:50.904531031 +0000 UTC m=+1422.576025578 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config") pod "neutron-7f868dbfb9-5bdsk" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e") : secret "neutron-config" not found Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.938100 4861 scope.go:117] "RemoveContainer" containerID="9d22b040aef5f1212a99cca021391b8a04401e06ef3cba31d31cf1356747f059" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.941881 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5b7fd56548-f8c7z"] Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.945016 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-z5wvn_5b52afb6-32de-4f14-9663-adeec08b4fad/ovn-controller/0.log" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.945143 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.949765 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-5b7fd56548-f8c7z"] Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.969147 4861 scope.go:117] "RemoveContainer" containerID="b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777" Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.980753 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 06:58:42 crc kubenswrapper[4861]: I0129 06:58:42.985503 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.004273 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shqvv\" (UniqueName: \"kubernetes.io/projected/5b52afb6-32de-4f14-9663-adeec08b4fad-kube-api-access-shqvv\") pod \"5b52afb6-32de-4f14-9663-adeec08b4fad\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.004343 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b52afb6-32de-4f14-9663-adeec08b4fad-scripts\") pod \"5b52afb6-32de-4f14-9663-adeec08b4fad\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.004380 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run\") pod \"5b52afb6-32de-4f14-9663-adeec08b4fad\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.004450 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-log-ovn\") pod \"5b52afb6-32de-4f14-9663-adeec08b4fad\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.004493 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run-ovn\") pod \"5b52afb6-32de-4f14-9663-adeec08b4fad\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.004514 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-combined-ca-bundle\") pod \"5b52afb6-32de-4f14-9663-adeec08b4fad\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.004549 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-ovn-controller-tls-certs\") pod \"5b52afb6-32de-4f14-9663-adeec08b4fad\" (UID: \"5b52afb6-32de-4f14-9663-adeec08b4fad\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.005125 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5b52afb6-32de-4f14-9663-adeec08b4fad" (UID: "5b52afb6-32de-4f14-9663-adeec08b4fad"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.005993 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5b52afb6-32de-4f14-9663-adeec08b4fad" (UID: "5b52afb6-32de-4f14-9663-adeec08b4fad"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.006013 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run" (OuterVolumeSpecName: "var-run") pod "5b52afb6-32de-4f14-9663-adeec08b4fad" (UID: "5b52afb6-32de-4f14-9663-adeec08b4fad"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.006027 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b52afb6-32de-4f14-9663-adeec08b4fad-scripts" (OuterVolumeSpecName: "scripts") pod "5b52afb6-32de-4f14-9663-adeec08b4fad" (UID: "5b52afb6-32de-4f14-9663-adeec08b4fad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.013047 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.020358 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.028242 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b52afb6-32de-4f14-9663-adeec08b4fad-kube-api-access-shqvv" (OuterVolumeSpecName: "kube-api-access-shqvv") pod "5b52afb6-32de-4f14-9663-adeec08b4fad" (UID: "5b52afb6-32de-4f14-9663-adeec08b4fad"). InnerVolumeSpecName "kube-api-access-shqvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.029357 4861 scope.go:117] "RemoveContainer" containerID="093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.031462 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.036437 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b52afb6-32de-4f14-9663-adeec08b4fad" (UID: "5b52afb6-32de-4f14-9663-adeec08b4fad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.038347 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.086002 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "5b52afb6-32de-4f14-9663-adeec08b4fad" (UID: "5b52afb6-32de-4f14-9663-adeec08b4fad"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.086116 4861 scope.go:117] "RemoveContainer" containerID="b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777" Jan 29 06:58:43 crc kubenswrapper[4861]: E0129 06:58:43.086585 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777\": container with ID starting with b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777 not found: ID does not exist" containerID="b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.086624 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777"} err="failed to get container status \"b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777\": rpc error: code = NotFound desc = could not find container \"b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777\": container with ID starting with b37fd0e09283ebf46efe622b9c7b60a87663532fee20b12e8f9d6436e9408777 not found: ID does not exist" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.086650 4861 scope.go:117] "RemoveContainer" containerID="093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6" Jan 29 06:58:43 crc kubenswrapper[4861]: E0129 06:58:43.087083 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6\": container with ID starting with 093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6 not found: ID does not exist" containerID="093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.087105 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6"} err="failed to get container status \"093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6\": rpc error: code = NotFound desc = could not find container \"093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6\": container with ID starting with 093f011b33208139150ea9ac32a1050ee5f730ec29a444888c76e7e83ebc6aa6 not found: ID does not exist" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.105971 4861 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.106010 4861 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.106019 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.106029 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b52afb6-32de-4f14-9663-adeec08b4fad-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.106039 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shqvv\" (UniqueName: \"kubernetes.io/projected/5b52afb6-32de-4f14-9663-adeec08b4fad-kube-api-access-shqvv\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.106047 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b52afb6-32de-4f14-9663-adeec08b4fad-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.106055 4861 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b52afb6-32de-4f14-9663-adeec08b4fad-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.126528 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27188e95-6192-4569-b254-c1e2d9b28086" path="/var/lib/kubelet/pods/27188e95-6192-4569-b254-c1e2d9b28086/volumes" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.127404 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b8b1385-123a-4b60-af39-82d6492a65c2" path="/var/lib/kubelet/pods/3b8b1385-123a-4b60-af39-82d6492a65c2/volumes" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.127929 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50ce05f5-34ea-4c94-ad92-9b458fa5c3c7" path="/var/lib/kubelet/pods/50ce05f5-34ea-4c94-ad92-9b458fa5c3c7/volumes" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.128883 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5966cedc-8ab5-4390-906b-c5ac39333e09" path="/var/lib/kubelet/pods/5966cedc-8ab5-4390-906b-c5ac39333e09/volumes" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.129556 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c370e6a-40e9-4055-857e-c8357c904c8e" path="/var/lib/kubelet/pods/8c370e6a-40e9-4055-857e-c8357c904c8e/volumes" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.130641 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" path="/var/lib/kubelet/pods/93a9df75-0ea9-457b-84f0-17b95d5dcced/volumes" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.417209 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.511560 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-config-data\") pod \"c749a121-7e8e-4d49-8a30-c27fa21926b5\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.511599 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhvz8\" (UniqueName: \"kubernetes.io/projected/c749a121-7e8e-4d49-8a30-c27fa21926b5-kube-api-access-rhvz8\") pod \"c749a121-7e8e-4d49-8a30-c27fa21926b5\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.511684 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-combined-ca-bundle\") pod \"c749a121-7e8e-4d49-8a30-c27fa21926b5\" (UID: \"c749a121-7e8e-4d49-8a30-c27fa21926b5\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.515757 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c749a121-7e8e-4d49-8a30-c27fa21926b5-kube-api-access-rhvz8" (OuterVolumeSpecName: "kube-api-access-rhvz8") pod "c749a121-7e8e-4d49-8a30-c27fa21926b5" (UID: "c749a121-7e8e-4d49-8a30-c27fa21926b5"). InnerVolumeSpecName "kube-api-access-rhvz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.535682 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-config-data" (OuterVolumeSpecName: "config-data") pod "c749a121-7e8e-4d49-8a30-c27fa21926b5" (UID: "c749a121-7e8e-4d49-8a30-c27fa21926b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.552210 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.557194 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c749a121-7e8e-4d49-8a30-c27fa21926b5" (UID: "c749a121-7e8e-4d49-8a30-c27fa21926b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613481 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-log-httpd\") pod \"333e76bd-235e-4b74-a6c9-ce702309ec38\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613519 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-run-httpd\") pod \"333e76bd-235e-4b74-a6c9-ce702309ec38\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613549 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-config-data\") pod \"333e76bd-235e-4b74-a6c9-ce702309ec38\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613594 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-combined-ca-bundle\") pod \"333e76bd-235e-4b74-a6c9-ce702309ec38\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613613 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-ceilometer-tls-certs\") pod \"333e76bd-235e-4b74-a6c9-ce702309ec38\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613635 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-scripts\") pod \"333e76bd-235e-4b74-a6c9-ce702309ec38\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613657 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6qwp\" (UniqueName: \"kubernetes.io/projected/333e76bd-235e-4b74-a6c9-ce702309ec38-kube-api-access-v6qwp\") pod \"333e76bd-235e-4b74-a6c9-ce702309ec38\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613724 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-sg-core-conf-yaml\") pod \"333e76bd-235e-4b74-a6c9-ce702309ec38\" (UID: \"333e76bd-235e-4b74-a6c9-ce702309ec38\") " Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613846 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "333e76bd-235e-4b74-a6c9-ce702309ec38" (UID: "333e76bd-235e-4b74-a6c9-ce702309ec38"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.613974 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "333e76bd-235e-4b74-a6c9-ce702309ec38" (UID: "333e76bd-235e-4b74-a6c9-ce702309ec38"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.614201 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.614222 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhvz8\" (UniqueName: \"kubernetes.io/projected/c749a121-7e8e-4d49-8a30-c27fa21926b5-kube-api-access-rhvz8\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.614233 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749a121-7e8e-4d49-8a30-c27fa21926b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.614242 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.614249 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/333e76bd-235e-4b74-a6c9-ce702309ec38-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.616843 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/333e76bd-235e-4b74-a6c9-ce702309ec38-kube-api-access-v6qwp" (OuterVolumeSpecName: "kube-api-access-v6qwp") pod "333e76bd-235e-4b74-a6c9-ce702309ec38" (UID: "333e76bd-235e-4b74-a6c9-ce702309ec38"). InnerVolumeSpecName "kube-api-access-v6qwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.617490 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-scripts" (OuterVolumeSpecName: "scripts") pod "333e76bd-235e-4b74-a6c9-ce702309ec38" (UID: "333e76bd-235e-4b74-a6c9-ce702309ec38"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.646291 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "333e76bd-235e-4b74-a6c9-ce702309ec38" (UID: "333e76bd-235e-4b74-a6c9-ce702309ec38"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.660226 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "333e76bd-235e-4b74-a6c9-ce702309ec38" (UID: "333e76bd-235e-4b74-a6c9-ce702309ec38"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.695847 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-config-data" (OuterVolumeSpecName: "config-data") pod "333e76bd-235e-4b74-a6c9-ce702309ec38" (UID: "333e76bd-235e-4b74-a6c9-ce702309ec38"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.698125 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "333e76bd-235e-4b74-a6c9-ce702309ec38" (UID: "333e76bd-235e-4b74-a6c9-ce702309ec38"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.717341 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.717369 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.717380 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.717389 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.717399 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6qwp\" (UniqueName: \"kubernetes.io/projected/333e76bd-235e-4b74-a6c9-ce702309ec38-kube-api-access-v6qwp\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.717408 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/333e76bd-235e-4b74-a6c9-ce702309ec38-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.860839 4861 generic.go:334] "Generic (PLEG): container finished" podID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerID="9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea" exitCode=0 Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.860907 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerDied","Data":"9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea"} Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.860939 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"333e76bd-235e-4b74-a6c9-ce702309ec38","Type":"ContainerDied","Data":"185e63fa8cf97b639bf0524658521d81e89c696f450caaa12871d4748e697b1f"} Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.860958 4861 scope.go:117] "RemoveContainer" containerID="d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.861115 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.870339 4861 generic.go:334] "Generic (PLEG): container finished" podID="c749a121-7e8e-4d49-8a30-c27fa21926b5" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" exitCode=0 Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.870501 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.870521 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c749a121-7e8e-4d49-8a30-c27fa21926b5","Type":"ContainerDied","Data":"928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3"} Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.870888 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c749a121-7e8e-4d49-8a30-c27fa21926b5","Type":"ContainerDied","Data":"7d9daaa12f60f919b4c72f557e4f3e4503ab8bdf76430f65c47a6e1becb7f4a7"} Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.876529 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-z5wvn_5b52afb6-32de-4f14-9663-adeec08b4fad/ovn-controller/0.log" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.876685 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-z5wvn" event={"ID":"5b52afb6-32de-4f14-9663-adeec08b4fad","Type":"ContainerDied","Data":"0b05369c08b4c72eb185ac082b775e15c7b7da6052ceb4cfe24070e9515aff73"} Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.876790 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-z5wvn" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.884066 4861 scope.go:117] "RemoveContainer" containerID="f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.923754 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-z5wvn"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.929973 4861 scope.go:117] "RemoveContainer" containerID="9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.937352 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-z5wvn"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.943443 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.950165 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.953096 4861 scope.go:117] "RemoveContainer" containerID="e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.956532 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.962682 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.979906 4861 scope.go:117] "RemoveContainer" containerID="d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337" Jan 29 06:58:43 crc kubenswrapper[4861]: E0129 06:58:43.980422 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337\": container with ID starting with d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337 not found: ID does not exist" containerID="d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.980467 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337"} err="failed to get container status \"d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337\": rpc error: code = NotFound desc = could not find container \"d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337\": container with ID starting with d8fbfbc5bee12caf638134907ff65a6d6834f55dc73eb03f61291432d2e89337 not found: ID does not exist" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.980496 4861 scope.go:117] "RemoveContainer" containerID="f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754" Jan 29 06:58:43 crc kubenswrapper[4861]: E0129 06:58:43.980911 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754\": container with ID starting with f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754 not found: ID does not exist" containerID="f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.981000 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754"} err="failed to get container status \"f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754\": rpc error: code = NotFound desc = could not find container \"f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754\": container with ID starting with f48692a509b5c7454f390e2acac93bc0d8d38babd31d2cef9e47cb3102890754 not found: ID does not exist" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.981027 4861 scope.go:117] "RemoveContainer" containerID="9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea" Jan 29 06:58:43 crc kubenswrapper[4861]: E0129 06:58:43.981378 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea\": container with ID starting with 9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea not found: ID does not exist" containerID="9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.981416 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea"} err="failed to get container status \"9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea\": rpc error: code = NotFound desc = could not find container \"9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea\": container with ID starting with 9d83039408e8d053abcb604f435773c23e320e085bbc5d972cecaa87501f2bea not found: ID does not exist" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.981441 4861 scope.go:117] "RemoveContainer" containerID="e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e" Jan 29 06:58:43 crc kubenswrapper[4861]: E0129 06:58:43.981739 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e\": container with ID starting with e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e not found: ID does not exist" containerID="e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.981771 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e"} err="failed to get container status \"e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e\": rpc error: code = NotFound desc = could not find container \"e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e\": container with ID starting with e3fc0c6cd07334967a1868e0ccc99f620f5c8644ea4096278fd8cde84433296e not found: ID does not exist" Jan 29 06:58:43 crc kubenswrapper[4861]: I0129 06:58:43.981790 4861 scope.go:117] "RemoveContainer" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" Jan 29 06:58:44 crc kubenswrapper[4861]: I0129 06:58:44.005697 4861 scope.go:117] "RemoveContainer" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" Jan 29 06:58:44 crc kubenswrapper[4861]: E0129 06:58:44.006241 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3\": container with ID starting with 928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3 not found: ID does not exist" containerID="928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3" Jan 29 06:58:44 crc kubenswrapper[4861]: I0129 06:58:44.006279 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3"} err="failed to get container status \"928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3\": rpc error: code = NotFound desc = could not find container \"928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3\": container with ID starting with 928df8c8b9f3d992377fa9f4ac60d4cd472096b8fff29a11cbbce13c148775c3 not found: ID does not exist" Jan 29 06:58:44 crc kubenswrapper[4861]: I0129 06:58:44.006306 4861 scope.go:117] "RemoveContainer" containerID="43ed60341599191e6dc9eb1bb3d03dd08460301dbced2c46cda3df1d5546241f" Jan 29 06:58:45 crc kubenswrapper[4861]: I0129 06:58:45.131731 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" path="/var/lib/kubelet/pods/333e76bd-235e-4b74-a6c9-ce702309ec38/volumes" Jan 29 06:58:45 crc kubenswrapper[4861]: I0129 06:58:45.133735 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" path="/var/lib/kubelet/pods/5b52afb6-32de-4f14-9663-adeec08b4fad/volumes" Jan 29 06:58:45 crc kubenswrapper[4861]: I0129 06:58:45.134918 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c749a121-7e8e-4d49-8a30-c27fa21926b5" path="/var/lib/kubelet/pods/c749a121-7e8e-4d49-8a30-c27fa21926b5/volumes" Jan 29 06:58:45 crc kubenswrapper[4861]: E0129 06:58:45.941701 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:45 crc kubenswrapper[4861]: E0129 06:58:45.942397 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:45 crc kubenswrapper[4861]: E0129 06:58:45.943022 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:45 crc kubenswrapper[4861]: E0129 06:58:45.943126 4861 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:58:45 crc kubenswrapper[4861]: E0129 06:58:45.944126 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:45 crc kubenswrapper[4861]: E0129 06:58:45.946021 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:45 crc kubenswrapper[4861]: E0129 06:58:45.947661 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:45 crc kubenswrapper[4861]: E0129 06:58:45.947692 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.659681 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.765611 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config\") pod \"ce4279f2-eded-42d5-9353-5235a6b7d64e\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.765681 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-ovndb-tls-certs\") pod \"ce4279f2-eded-42d5-9353-5235a6b7d64e\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.765764 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-internal-tls-certs\") pod \"ce4279f2-eded-42d5-9353-5235a6b7d64e\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.765812 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-combined-ca-bundle\") pod \"ce4279f2-eded-42d5-9353-5235a6b7d64e\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.765839 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config\") pod \"ce4279f2-eded-42d5-9353-5235a6b7d64e\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.765880 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkr9p\" (UniqueName: \"kubernetes.io/projected/ce4279f2-eded-42d5-9353-5235a6b7d64e-kube-api-access-bkr9p\") pod \"ce4279f2-eded-42d5-9353-5235a6b7d64e\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.766001 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-public-tls-certs\") pod \"ce4279f2-eded-42d5-9353-5235a6b7d64e\" (UID: \"ce4279f2-eded-42d5-9353-5235a6b7d64e\") " Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.772629 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "ce4279f2-eded-42d5-9353-5235a6b7d64e" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.772858 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce4279f2-eded-42d5-9353-5235a6b7d64e-kube-api-access-bkr9p" (OuterVolumeSpecName: "kube-api-access-bkr9p") pod "ce4279f2-eded-42d5-9353-5235a6b7d64e" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e"). InnerVolumeSpecName "kube-api-access-bkr9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.813113 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config" (OuterVolumeSpecName: "config") pod "ce4279f2-eded-42d5-9353-5235a6b7d64e" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.814215 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ce4279f2-eded-42d5-9353-5235a6b7d64e" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.814551 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce4279f2-eded-42d5-9353-5235a6b7d64e" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.817532 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "ce4279f2-eded-42d5-9353-5235a6b7d64e" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.827253 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ce4279f2-eded-42d5-9353-5235a6b7d64e" (UID: "ce4279f2-eded-42d5-9353-5235a6b7d64e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.867247 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.867276 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.867286 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.867295 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkr9p\" (UniqueName: \"kubernetes.io/projected/ce4279f2-eded-42d5-9353-5235a6b7d64e-kube-api-access-bkr9p\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.867306 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.867315 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-config\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.867323 4861 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce4279f2-eded-42d5-9353-5235a6b7d64e-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.930998 4861 generic.go:334] "Generic (PLEG): container finished" podID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerID="ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e" exitCode=0 Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.931139 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f868dbfb9-5bdsk" event={"ID":"ce4279f2-eded-42d5-9353-5235a6b7d64e","Type":"ContainerDied","Data":"ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e"} Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.931194 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f868dbfb9-5bdsk" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.931525 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f868dbfb9-5bdsk" event={"ID":"ce4279f2-eded-42d5-9353-5235a6b7d64e","Type":"ContainerDied","Data":"78205a1a4450fceb7130e9998c3b6dbdfcca1d4c192cbe04db325a7a9943c942"} Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.931730 4861 scope.go:117] "RemoveContainer" containerID="fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.961976 4861 scope.go:117] "RemoveContainer" containerID="ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.990412 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f868dbfb9-5bdsk"] Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.997242 4861 scope.go:117] "RemoveContainer" containerID="fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.997719 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7f868dbfb9-5bdsk"] Jan 29 06:58:46 crc kubenswrapper[4861]: E0129 06:58:46.997916 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2\": container with ID starting with fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2 not found: ID does not exist" containerID="fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.997961 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2"} err="failed to get container status \"fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2\": rpc error: code = NotFound desc = could not find container \"fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2\": container with ID starting with fd3d0c8b5b542afbdb4919e4047d80d3e855011735387b7de4c97d91eebf26b2 not found: ID does not exist" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.997992 4861 scope.go:117] "RemoveContainer" containerID="ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e" Jan 29 06:58:46 crc kubenswrapper[4861]: E0129 06:58:46.998524 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e\": container with ID starting with ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e not found: ID does not exist" containerID="ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e" Jan 29 06:58:46 crc kubenswrapper[4861]: I0129 06:58:46.998563 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e"} err="failed to get container status \"ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e\": rpc error: code = NotFound desc = could not find container \"ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e\": container with ID starting with ebc41b0b4c04cc65807325f1ddd4fe3d1b0ef0774fc8120b40bb4aec51481d4e not found: ID does not exist" Jan 29 06:58:47 crc kubenswrapper[4861]: I0129 06:58:47.133860 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" path="/var/lib/kubelet/pods/ce4279f2-eded-42d5-9353-5235a6b7d64e/volumes" Jan 29 06:58:50 crc kubenswrapper[4861]: E0129 06:58:50.940995 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:50 crc kubenswrapper[4861]: E0129 06:58:50.942669 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:50 crc kubenswrapper[4861]: E0129 06:58:50.942989 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:50 crc kubenswrapper[4861]: E0129 06:58:50.943016 4861 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:58:50 crc kubenswrapper[4861]: E0129 06:58:50.943500 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:50 crc kubenswrapper[4861]: E0129 06:58:50.945131 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:50 crc kubenswrapper[4861]: E0129 06:58:50.947206 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:50 crc kubenswrapper[4861]: E0129 06:58:50.947276 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:58:55 crc kubenswrapper[4861]: E0129 06:58:55.941444 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:55 crc kubenswrapper[4861]: E0129 06:58:55.942638 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:55 crc kubenswrapper[4861]: E0129 06:58:55.943224 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:58:55 crc kubenswrapper[4861]: E0129 06:58:55.943283 4861 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:58:55 crc kubenswrapper[4861]: E0129 06:58:55.945596 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:55 crc kubenswrapper[4861]: E0129 06:58:55.947737 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:55 crc kubenswrapper[4861]: E0129 06:58:55.949690 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:58:55 crc kubenswrapper[4861]: E0129 06:58:55.949723 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:59:00 crc kubenswrapper[4861]: E0129 06:59:00.941941 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:59:00 crc kubenswrapper[4861]: E0129 06:59:00.943191 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:59:00 crc kubenswrapper[4861]: E0129 06:59:00.943841 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 06:59:00 crc kubenswrapper[4861]: E0129 06:59:00.943924 4861 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:59:00 crc kubenswrapper[4861]: E0129 06:59:00.944029 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:59:00 crc kubenswrapper[4861]: E0129 06:59:00.947757 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:59:00 crc kubenswrapper[4861]: E0129 06:59:00.949385 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 06:59:00 crc kubenswrapper[4861]: E0129 06:59:00.949458 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-6n7w9" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:59:04 crc kubenswrapper[4861]: E0129 06:59:04.670373 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d54030c_d725_4a6c_ad29_d84482378f20.slice/crio-0f6ad2c5dcab8a4a865c78703af5ec17abaa3949c079878716530d1bf7fd0391.scope\": RecentStats: unable to find data in memory cache]" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.134240 4861 generic.go:334] "Generic (PLEG): container finished" podID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerID="fd9675094dbc5e4671db27d2d11399c30b5682eca9316dcb9802ca14217ef8f4" exitCode=137 Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.140984 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b1c31da3-c703-4d07-82e5-b02fe841a548","Type":"ContainerDied","Data":"fd9675094dbc5e4671db27d2d11399c30b5682eca9316dcb9802ca14217ef8f4"} Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.141434 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b1c31da3-c703-4d07-82e5-b02fe841a548","Type":"ContainerDied","Data":"b1a329988008787d1cfa716266a509dd11d47339ff28b145e6b29bdaaf290f98"} Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.141458 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1a329988008787d1cfa716266a509dd11d47339ff28b145e6b29bdaaf290f98" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.144281 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d54030c-d725-4a6c-ad29-d84482378f20" containerID="0f6ad2c5dcab8a4a865c78703af5ec17abaa3949c079878716530d1bf7fd0391" exitCode=137 Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.144345 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"0f6ad2c5dcab8a4a865c78703af5ec17abaa3949c079878716530d1bf7fd0391"} Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.267377 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.278974 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369037 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-combined-ca-bundle\") pod \"b1c31da3-c703-4d07-82e5-b02fe841a548\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369117 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b1c31da3-c703-4d07-82e5-b02fe841a548-etc-machine-id\") pod \"b1c31da3-c703-4d07-82e5-b02fe841a548\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369146 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"7d54030c-d725-4a6c-ad29-d84482378f20\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369199 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-lock\") pod \"7d54030c-d725-4a6c-ad29-d84482378f20\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369241 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") pod \"7d54030c-d725-4a6c-ad29-d84482378f20\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369285 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data-custom\") pod \"b1c31da3-c703-4d07-82e5-b02fe841a548\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369335 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfwnr\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-kube-api-access-cfwnr\") pod \"7d54030c-d725-4a6c-ad29-d84482378f20\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369360 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-cache\") pod \"7d54030c-d725-4a6c-ad29-d84482378f20\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369394 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6v9x\" (UniqueName: \"kubernetes.io/projected/b1c31da3-c703-4d07-82e5-b02fe841a548-kube-api-access-c6v9x\") pod \"b1c31da3-c703-4d07-82e5-b02fe841a548\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369422 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data\") pod \"b1c31da3-c703-4d07-82e5-b02fe841a548\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369445 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d54030c-d725-4a6c-ad29-d84482378f20-combined-ca-bundle\") pod \"7d54030c-d725-4a6c-ad29-d84482378f20\" (UID: \"7d54030c-d725-4a6c-ad29-d84482378f20\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.369542 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-scripts\") pod \"b1c31da3-c703-4d07-82e5-b02fe841a548\" (UID: \"b1c31da3-c703-4d07-82e5-b02fe841a548\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.370719 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-lock" (OuterVolumeSpecName: "lock") pod "7d54030c-d725-4a6c-ad29-d84482378f20" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.370778 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1c31da3-c703-4d07-82e5-b02fe841a548-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b1c31da3-c703-4d07-82e5-b02fe841a548" (UID: "b1c31da3-c703-4d07-82e5-b02fe841a548"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.375022 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1c31da3-c703-4d07-82e5-b02fe841a548-kube-api-access-c6v9x" (OuterVolumeSpecName: "kube-api-access-c6v9x") pod "b1c31da3-c703-4d07-82e5-b02fe841a548" (UID: "b1c31da3-c703-4d07-82e5-b02fe841a548"). InnerVolumeSpecName "kube-api-access-c6v9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.375461 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-scripts" (OuterVolumeSpecName: "scripts") pod "b1c31da3-c703-4d07-82e5-b02fe841a548" (UID: "b1c31da3-c703-4d07-82e5-b02fe841a548"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.375484 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-cache" (OuterVolumeSpecName: "cache") pod "7d54030c-d725-4a6c-ad29-d84482378f20" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.376265 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b1c31da3-c703-4d07-82e5-b02fe841a548" (UID: "b1c31da3-c703-4d07-82e5-b02fe841a548"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.376947 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-kube-api-access-cfwnr" (OuterVolumeSpecName: "kube-api-access-cfwnr") pod "7d54030c-d725-4a6c-ad29-d84482378f20" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20"). InnerVolumeSpecName "kube-api-access-cfwnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.378114 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "7d54030c-d725-4a6c-ad29-d84482378f20" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.379198 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "swift") pod "7d54030c-d725-4a6c-ad29-d84482378f20" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.423064 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1c31da3-c703-4d07-82e5-b02fe841a548" (UID: "b1c31da3-c703-4d07-82e5-b02fe841a548"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.456821 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data" (OuterVolumeSpecName: "config-data") pod "b1c31da3-c703-4d07-82e5-b02fe841a548" (UID: "b1c31da3-c703-4d07-82e5-b02fe841a548"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.471414 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.471551 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b1c31da3-c703-4d07-82e5-b02fe841a548-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.471650 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.471742 4861 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-lock\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.471835 4861 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.471914 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.471993 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfwnr\" (UniqueName: \"kubernetes.io/projected/7d54030c-d725-4a6c-ad29-d84482378f20-kube-api-access-cfwnr\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.472066 4861 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7d54030c-d725-4a6c-ad29-d84482378f20-cache\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.472160 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6v9x\" (UniqueName: \"kubernetes.io/projected/b1c31da3-c703-4d07-82e5-b02fe841a548-kube-api-access-c6v9x\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.472230 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.472311 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1c31da3-c703-4d07-82e5-b02fe841a548-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.490304 4861 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.571908 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6n7w9_633f63c1-539f-4477-8aae-d6731a514280/ovs-vswitchd/0.log" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.572858 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.573682 4861 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674241 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/633f63c1-539f-4477-8aae-d6731a514280-scripts\") pod \"633f63c1-539f-4477-8aae-d6731a514280\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674339 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-run\") pod \"633f63c1-539f-4477-8aae-d6731a514280\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674380 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-lib\") pod \"633f63c1-539f-4477-8aae-d6731a514280\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674439 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-etc-ovs\") pod \"633f63c1-539f-4477-8aae-d6731a514280\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674514 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8nbk\" (UniqueName: \"kubernetes.io/projected/633f63c1-539f-4477-8aae-d6731a514280-kube-api-access-l8nbk\") pod \"633f63c1-539f-4477-8aae-d6731a514280\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674540 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-log\") pod \"633f63c1-539f-4477-8aae-d6731a514280\" (UID: \"633f63c1-539f-4477-8aae-d6731a514280\") " Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674688 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-lib" (OuterVolumeSpecName: "var-lib") pod "633f63c1-539f-4477-8aae-d6731a514280" (UID: "633f63c1-539f-4477-8aae-d6731a514280"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674755 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-run" (OuterVolumeSpecName: "var-run") pod "633f63c1-539f-4477-8aae-d6731a514280" (UID: "633f63c1-539f-4477-8aae-d6731a514280"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.674838 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "633f63c1-539f-4477-8aae-d6731a514280" (UID: "633f63c1-539f-4477-8aae-d6731a514280"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.675646 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-log" (OuterVolumeSpecName: "var-log") pod "633f63c1-539f-4477-8aae-d6731a514280" (UID: "633f63c1-539f-4477-8aae-d6731a514280"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.675939 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/633f63c1-539f-4477-8aae-d6731a514280-scripts" (OuterVolumeSpecName: "scripts") pod "633f63c1-539f-4477-8aae-d6731a514280" (UID: "633f63c1-539f-4477-8aae-d6731a514280"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.676272 4861 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-lib\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.676297 4861 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-etc-ovs\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.676309 4861 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-log\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.676319 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/633f63c1-539f-4477-8aae-d6731a514280-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.676328 4861 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/633f63c1-539f-4477-8aae-d6731a514280-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.678211 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d54030c-d725-4a6c-ad29-d84482378f20-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7d54030c-d725-4a6c-ad29-d84482378f20" (UID: "7d54030c-d725-4a6c-ad29-d84482378f20"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.678254 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/633f63c1-539f-4477-8aae-d6731a514280-kube-api-access-l8nbk" (OuterVolumeSpecName: "kube-api-access-l8nbk") pod "633f63c1-539f-4477-8aae-d6731a514280" (UID: "633f63c1-539f-4477-8aae-d6731a514280"). InnerVolumeSpecName "kube-api-access-l8nbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.777152 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d54030c-d725-4a6c-ad29-d84482378f20-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:05 crc kubenswrapper[4861]: I0129 06:59:05.777187 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8nbk\" (UniqueName: \"kubernetes.io/projected/633f63c1-539f-4477-8aae-d6731a514280-kube-api-access-l8nbk\") on node \"crc\" DevicePath \"\"" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.160150 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-6n7w9_633f63c1-539f-4477-8aae-d6731a514280/ovs-vswitchd/0.log" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.161649 4861 generic.go:334] "Generic (PLEG): container finished" podID="633f63c1-539f-4477-8aae-d6731a514280" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" exitCode=137 Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.161719 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6n7w9" event={"ID":"633f63c1-539f-4477-8aae-d6731a514280","Type":"ContainerDied","Data":"f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f"} Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.161796 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-6n7w9" event={"ID":"633f63c1-539f-4477-8aae-d6731a514280","Type":"ContainerDied","Data":"9c31799a9d35eeb6391fb1cb5480751f486272dd441b9fe4dc9735c2dd41dea0"} Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.161828 4861 scope.go:117] "RemoveContainer" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.162245 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-6n7w9" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.174151 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.174270 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.174147 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7d54030c-d725-4a6c-ad29-d84482378f20","Type":"ContainerDied","Data":"62e165a6b3e7935aaad8da87fafaf356e2b0cb08e9376558b4168b295cdf13c7"} Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.224698 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.225532 4861 scope.go:117] "RemoveContainer" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.232177 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.244595 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.253355 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.273243 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-6n7w9"] Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.278387 4861 scope.go:117] "RemoveContainer" containerID="7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.279087 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-6n7w9"] Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.312635 4861 scope.go:117] "RemoveContainer" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" Jan 29 06:59:06 crc kubenswrapper[4861]: E0129 06:59:06.313244 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f\": container with ID starting with f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f not found: ID does not exist" containerID="f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.313280 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f"} err="failed to get container status \"f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f\": rpc error: code = NotFound desc = could not find container \"f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f\": container with ID starting with f01091bd44e299c3b4a9d4c9499c9aafcc593f01a6eb2bcfcc41c4fb48ebee0f not found: ID does not exist" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.313304 4861 scope.go:117] "RemoveContainer" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" Jan 29 06:59:06 crc kubenswrapper[4861]: E0129 06:59:06.313719 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52\": container with ID starting with df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 not found: ID does not exist" containerID="df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.313786 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52"} err="failed to get container status \"df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52\": rpc error: code = NotFound desc = could not find container \"df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52\": container with ID starting with df76894860a34b8cad898c8a6b31439654bc3a71bf6740a099e2ea031ece4c52 not found: ID does not exist" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.313826 4861 scope.go:117] "RemoveContainer" containerID="7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f" Jan 29 06:59:06 crc kubenswrapper[4861]: E0129 06:59:06.314269 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f\": container with ID starting with 7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f not found: ID does not exist" containerID="7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.314310 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f"} err="failed to get container status \"7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f\": rpc error: code = NotFound desc = could not find container \"7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f\": container with ID starting with 7b3d7a1bfbd9f7051aecfdfc98f93e13778cf0788546fce2d6915d3bca1db38f not found: ID does not exist" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.314335 4861 scope.go:117] "RemoveContainer" containerID="0f6ad2c5dcab8a4a865c78703af5ec17abaa3949c079878716530d1bf7fd0391" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.336303 4861 scope.go:117] "RemoveContainer" containerID="849976197be27f3f0414f54d8c975813716a50cdd59b37975b4eb4bb0b453c69" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.361842 4861 scope.go:117] "RemoveContainer" containerID="80b76bbf5574a1ffe9a28896fadf09a48689fb5bb78991c8c124528c6850d0ee" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.389891 4861 scope.go:117] "RemoveContainer" containerID="300cdcb844a68c46fd719e6be6e862e7b417f885d1ab7289bf038801298b0951" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.414619 4861 scope.go:117] "RemoveContainer" containerID="e2a3b495086295e31b7ed56c3d2932e3f985fccd26d8e9e239e77653b59a0d32" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.441080 4861 scope.go:117] "RemoveContainer" containerID="7fed2197542cb4f3117973c4387005866a5b3aa792d7b6f414b399fca8226503" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.467019 4861 scope.go:117] "RemoveContainer" containerID="696299cd0fa4bb5069c2910a6be63baa743730b8326a70bb3ffd8aa9d1c825ec" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.484775 4861 scope.go:117] "RemoveContainer" containerID="3281266ddbd401b2f04a7cb7e231cd35c5bced4b7f65472c79c6cab82698c818" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.507335 4861 scope.go:117] "RemoveContainer" containerID="18d6ec1b3d371c36c925fb4104455a8183e0a1995e0abd435a9954ffab121835" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.524399 4861 scope.go:117] "RemoveContainer" containerID="42fe23b69a4684b68ede63233c8ea85578f5383ad1505896e099548f6e44a6ea" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.556596 4861 scope.go:117] "RemoveContainer" containerID="5f0359b2c69c9a01c0a74bbb8ecc34b7cb21acbd0a142f267a70aaf243d0d4d1" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.585377 4861 scope.go:117] "RemoveContainer" containerID="f247857b6eb8ade45650fa7fc5c2b6bff1ac506097b24f9f3cdf86be8a43d2d4" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.619378 4861 scope.go:117] "RemoveContainer" containerID="c2889e92275d93552a69c4569021d1f48b14b5ad80332e996fa65c8fc322719d" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.650402 4861 scope.go:117] "RemoveContainer" containerID="90931a6cfdb8a44357367186d2c4396fd4c9ac22d948ca358a02706b89784468" Jan 29 06:59:06 crc kubenswrapper[4861]: I0129 06:59:06.684140 4861 scope.go:117] "RemoveContainer" containerID="27fc60fdd9d503cf21c40b0704ab3f668d5965d81d74cbbc4c3aa6e2ce528d23" Jan 29 06:59:07 crc kubenswrapper[4861]: I0129 06:59:07.123915 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="633f63c1-539f-4477-8aae-d6731a514280" path="/var/lib/kubelet/pods/633f63c1-539f-4477-8aae-d6731a514280/volumes" Jan 29 06:59:07 crc kubenswrapper[4861]: I0129 06:59:07.124631 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" path="/var/lib/kubelet/pods/7d54030c-d725-4a6c-ad29-d84482378f20/volumes" Jan 29 06:59:07 crc kubenswrapper[4861]: I0129 06:59:07.125932 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" path="/var/lib/kubelet/pods/b1c31da3-c703-4d07-82e5-b02fe841a548/volumes" Jan 29 06:59:30 crc kubenswrapper[4861]: I0129 06:59:30.629681 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 06:59:30 crc kubenswrapper[4861]: I0129 06:59:30.630230 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.824789 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mt4xb"] Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.831175 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-reaper" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.831297 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-reaper" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.831353 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.831405 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-server" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.831465 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="swift-recon-cron" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.831515 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="swift-recon-cron" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.831569 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27188e95-6192-4569-b254-c1e2d9b28086" containerName="keystone-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.831621 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="27188e95-6192-4569-b254-c1e2d9b28086" containerName="keystone-api" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.831674 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerName="mysql-bootstrap" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.831724 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerName="mysql-bootstrap" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.831798 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-metadata" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.831860 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-metadata" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.831913 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerName="cinder-scheduler" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.831967 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerName="cinder-scheduler" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832035 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-updater" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832101 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-updater" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832159 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832216 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-api" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832250 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34dfd085-c2bc-4fa4-a950-7df85c48fec0" containerName="memcached" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832257 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="34dfd085-c2bc-4fa4-a950-7df85c48fec0" containerName="memcached" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832267 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerName="glance-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832273 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerName="glance-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832284 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832292 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832300 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-updater" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832307 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-updater" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832313 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832318 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832325 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerName="glance-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832331 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerName="glance-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832338 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerName="glance-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832344 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerName="glance-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832352 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="sg-core" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832358 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="sg-core" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832367 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerName="ovn-northd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832372 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerName="ovn-northd" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832380 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832386 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832395 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832401 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832411 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerName="rabbitmq" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832419 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerName="rabbitmq" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832428 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832434 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-server" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832442 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="953c17ea-50f0-4111-8bc1-16819c1bce47" containerName="kube-state-metrics" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832448 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="953c17ea-50f0-4111-8bc1-16819c1bce47" containerName="kube-state-metrics" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832457 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="rsync" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832463 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="rsync" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832469 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerName="glance-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832475 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerName="glance-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832480 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832486 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832495 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="registry-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832500 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="registry-server" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832506 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832512 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-api" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832519 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="proxy-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832524 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="proxy-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832535 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832560 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832571 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832576 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832584 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d72b59e5-64c2-4eab-955e-89d6298e834e" containerName="nova-cell0-conductor-conductor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832589 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d72b59e5-64c2-4eab-955e-89d6298e834e" containerName="nova-cell0-conductor-conductor" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832596 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832601 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832611 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832616 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832626 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerName="cinder-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832632 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerName="cinder-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832643 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerName="placement-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832648 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerName="placement-api" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832655 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerName="openstack-network-exporter" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832661 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerName="openstack-network-exporter" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832670 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerName="barbican-worker-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832676 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerName="barbican-worker-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832686 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="ceilometer-notification-agent" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832692 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="ceilometer-notification-agent" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832701 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerName="cinder-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832707 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerName="cinder-api" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832714 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832720 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832727 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832732 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832738 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerName="barbican-keystone-listener" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832744 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerName="barbican-keystone-listener" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832754 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832760 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-server" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832767 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="ceilometer-central-agent" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832772 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="ceilometer-central-agent" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832780 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerName="barbican-keystone-listener-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832786 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerName="barbican-keystone-listener-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832795 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="extract-utilities" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832800 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="extract-utilities" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832808 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server-init" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832814 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server-init" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832820 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerName="rabbitmq" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832825 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerName="rabbitmq" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832832 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-expirer" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832837 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-expirer" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832843 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832848 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832856 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerName="setup-container" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832861 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerName="setup-container" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832871 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerName="setup-container" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832876 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerName="setup-container" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832884 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerName="placement-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832889 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerName="placement-log" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832896 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerName="barbican-worker" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832902 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerName="barbican-worker" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832911 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="extract-content" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832929 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="extract-content" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832938 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832943 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832950 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerName="galera" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832956 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerName="galera" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832964 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerName="probe" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832970 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerName="probe" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832977 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c749a121-7e8e-4d49-8a30-c27fa21926b5" containerName="nova-cell1-conductor-conductor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832983 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c749a121-7e8e-4d49-8a30-c27fa21926b5" containerName="nova-cell1-conductor-conductor" Jan 29 06:59:52 crc kubenswrapper[4861]: E0129 06:59:52.832989 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.832995 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833124 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerName="probe" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833136 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerName="placement-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833143 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5966cedc-8ab5-4390-906b-c5ac39333e09" containerName="rabbitmq" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833152 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="sg-core" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833160 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-expirer" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833169 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-updater" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833176 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerName="openstack-network-exporter" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833181 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerName="barbican-keystone-listener" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833188 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c749a121-7e8e-4d49-8a30-c27fa21926b5" containerName="nova-cell1-conductor-conductor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833196 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833204 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="27188e95-6192-4569-b254-c1e2d9b28086" containerName="keystone-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833210 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833218 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833224 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovsdb-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833233 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerName="glance-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833243 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="rsync" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833251 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d72b59e5-64c2-4eab-955e-89d6298e834e" containerName="nova-cell0-conductor-conductor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833257 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b8b1385-123a-4b60-af39-82d6492a65c2" containerName="rabbitmq" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833267 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="swift-recon-cron" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833274 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7dfeca3-8fa1-4323-aab9-13f91619ec59" containerName="registry-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833283 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-updater" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833289 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b488de3-67a5-49cf-a61a-37a44acbbe19" containerName="placement-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833297 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b52afb6-32de-4f14-9663-adeec08b4fad" containerName="ovn-controller" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833304 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833312 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="953c17ea-50f0-4111-8bc1-16819c1bce47" containerName="kube-state-metrics" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833317 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerName="cinder-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833323 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833331 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="93a9df75-0ea9-457b-84f0-17b95d5dcced" containerName="ovn-northd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833336 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="proxy-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833346 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833354 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833364 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1c31da3-c703-4d07-82e5-b02fe841a548" containerName="cinder-scheduler" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833370 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="633f63c1-539f-4477-8aae-d6731a514280" containerName="ovs-vswitchd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833378 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerName="barbican-worker-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833384 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c25bacb-4105-4fa4-a798-117f9cbe75fe" containerName="cinder-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833391 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833398 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833404 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="object-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833412 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0f81cf5-e2b0-43da-af6e-de9c1d3c8aed" containerName="nova-metadata-metadata" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833421 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-auditor" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833428 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="ceilometer-central-agent" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833434 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="container-server" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833443 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="947c222c-8f0c-423f-84e8-75a4b9322829" containerName="barbican-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833451 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerName="glance-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833462 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce4279f2-eded-42d5-9353-5235a6b7d64e" containerName="neutron-api" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833468 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-replicator" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833475 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b22efc-707a-4ffc-8edc-44c39900ba2b" containerName="glance-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833482 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e78be7f2-60d4-4f0e-a510-bf5e652110d1" containerName="nova-api-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833488 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="34dfd085-c2bc-4fa4-a950-7df85c48fec0" containerName="memcached" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833494 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="09dd2891-14bd-4b67-a7d8-26d74fcaa6a3" containerName="barbican-worker" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833502 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9ed061-0329-42e0-8cca-e7b560c7a19c" containerName="barbican-keystone-listener-log" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833509 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ce76094-c71f-46c7-a69d-7d30d8540c5a" containerName="glance-httpd" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833517 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d54030c-d725-4a6c-ad29-d84482378f20" containerName="account-reaper" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833525 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="333e76bd-235e-4b74-a6c9-ce702309ec38" containerName="ceilometer-notification-agent" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.833532 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c370e6a-40e9-4055-857e-c8357c904c8e" containerName="galera" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.834578 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:52 crc kubenswrapper[4861]: I0129 06:59:52.843316 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mt4xb"] Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.024514 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-utilities\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.024753 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-catalog-content\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.024840 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7ccb\" (UniqueName: \"kubernetes.io/projected/c57f3f4f-cad1-459a-87da-307f309751d9-kube-api-access-c7ccb\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.125793 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-catalog-content\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.125853 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7ccb\" (UniqueName: \"kubernetes.io/projected/c57f3f4f-cad1-459a-87da-307f309751d9-kube-api-access-c7ccb\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.125927 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-utilities\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.126399 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-utilities\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.126754 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-catalog-content\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.165036 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7ccb\" (UniqueName: \"kubernetes.io/projected/c57f3f4f-cad1-459a-87da-307f309751d9-kube-api-access-c7ccb\") pod \"community-operators-mt4xb\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.174904 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mt4xb" Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.506313 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mt4xb"] Jan 29 06:59:53 crc kubenswrapper[4861]: I0129 06:59:53.668247 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mt4xb" event={"ID":"c57f3f4f-cad1-459a-87da-307f309751d9","Type":"ContainerStarted","Data":"820100471d6d3f647744ed14b95f86709a02e62a0f1b3ae2df32df8c24e43d5b"} Jan 29 06:59:54 crc kubenswrapper[4861]: I0129 06:59:54.679858 4861 generic.go:334] "Generic (PLEG): container finished" podID="c57f3f4f-cad1-459a-87da-307f309751d9" containerID="fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81" exitCode=0 Jan 29 06:59:54 crc kubenswrapper[4861]: I0129 06:59:54.679904 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mt4xb" event={"ID":"c57f3f4f-cad1-459a-87da-307f309751d9","Type":"ContainerDied","Data":"fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81"} Jan 29 06:59:55 crc kubenswrapper[4861]: I0129 06:59:55.688183 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mt4xb" event={"ID":"c57f3f4f-cad1-459a-87da-307f309751d9","Type":"ContainerStarted","Data":"ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919"} Jan 29 06:59:56 crc kubenswrapper[4861]: I0129 06:59:56.701269 4861 generic.go:334] "Generic (PLEG): container finished" podID="c57f3f4f-cad1-459a-87da-307f309751d9" containerID="ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919" exitCode=0 Jan 29 06:59:56 crc kubenswrapper[4861]: I0129 06:59:56.701353 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mt4xb" event={"ID":"c57f3f4f-cad1-459a-87da-307f309751d9","Type":"ContainerDied","Data":"ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919"} Jan 29 06:59:57 crc kubenswrapper[4861]: I0129 06:59:57.710552 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mt4xb" event={"ID":"c57f3f4f-cad1-459a-87da-307f309751d9","Type":"ContainerStarted","Data":"55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e"} Jan 29 06:59:57 crc kubenswrapper[4861]: I0129 06:59:57.737339 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mt4xb" podStartSLOduration=3.310898035 podStartE2EDuration="5.737317615s" podCreationTimestamp="2026-01-29 06:59:52 +0000 UTC" firstStartedPulling="2026-01-29 06:59:54.681831072 +0000 UTC m=+1486.353325629" lastFinishedPulling="2026-01-29 06:59:57.108250652 +0000 UTC m=+1488.779745209" observedRunningTime="2026-01-29 06:59:57.730189345 +0000 UTC m=+1489.401683952" watchObservedRunningTime="2026-01-29 06:59:57.737317615 +0000 UTC m=+1489.408812172" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.146830 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9"] Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.148262 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.149890 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.150259 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.175185 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9"] Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.208393 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wb48n"] Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.210341 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.216132 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb48n"] Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.244460 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jprk\" (UniqueName: \"kubernetes.io/projected/b862adb6-7c39-4a96-bddb-b35306031ebc-kube-api-access-2jprk\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.244556 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b862adb6-7c39-4a96-bddb-b35306031ebc-config-volume\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.244597 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b862adb6-7c39-4a96-bddb-b35306031ebc-secret-volume\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.244654 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shkm8\" (UniqueName: \"kubernetes.io/projected/f4e98fb1-8453-4881-afea-142a9bcb40ac-kube-api-access-shkm8\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.244710 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-utilities\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.244818 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-catalog-content\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.345765 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b862adb6-7c39-4a96-bddb-b35306031ebc-secret-volume\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.345826 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shkm8\" (UniqueName: \"kubernetes.io/projected/f4e98fb1-8453-4881-afea-142a9bcb40ac-kube-api-access-shkm8\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.345864 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-utilities\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.345915 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-catalog-content\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.345979 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jprk\" (UniqueName: \"kubernetes.io/projected/b862adb6-7c39-4a96-bddb-b35306031ebc-kube-api-access-2jprk\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.346014 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b862adb6-7c39-4a96-bddb-b35306031ebc-config-volume\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.346794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-utilities\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.347000 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b862adb6-7c39-4a96-bddb-b35306031ebc-config-volume\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.347175 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-catalog-content\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.350974 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b862adb6-7c39-4a96-bddb-b35306031ebc-secret-volume\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.364607 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shkm8\" (UniqueName: \"kubernetes.io/projected/f4e98fb1-8453-4881-afea-142a9bcb40ac-kube-api-access-shkm8\") pod \"redhat-marketplace-wb48n\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.379155 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jprk\" (UniqueName: \"kubernetes.io/projected/b862adb6-7c39-4a96-bddb-b35306031ebc-kube-api-access-2jprk\") pod \"collect-profiles-29494500-qjjd9\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.479941 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.525725 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.629835 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.630229 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:00:00 crc kubenswrapper[4861]: I0129 07:00:00.729860 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9"] Jan 29 07:00:00 crc kubenswrapper[4861]: W0129 07:00:00.733774 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb862adb6_7c39_4a96_bddb_b35306031ebc.slice/crio-dc85847ea8f6da3949792d573424fa204e967e2a2f013651f01a9cb643690a2b WatchSource:0}: Error finding container dc85847ea8f6da3949792d573424fa204e967e2a2f013651f01a9cb643690a2b: Status 404 returned error can't find the container with id dc85847ea8f6da3949792d573424fa204e967e2a2f013651f01a9cb643690a2b Jan 29 07:00:01 crc kubenswrapper[4861]: I0129 07:00:01.033769 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb48n"] Jan 29 07:00:01 crc kubenswrapper[4861]: W0129 07:00:01.039641 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4e98fb1_8453_4881_afea_142a9bcb40ac.slice/crio-1b4a578238c555e00f49a4442caeeebe2c57c35ee512b3aa2d0fa67626087b99 WatchSource:0}: Error finding container 1b4a578238c555e00f49a4442caeeebe2c57c35ee512b3aa2d0fa67626087b99: Status 404 returned error can't find the container with id 1b4a578238c555e00f49a4442caeeebe2c57c35ee512b3aa2d0fa67626087b99 Jan 29 07:00:01 crc kubenswrapper[4861]: I0129 07:00:01.746228 4861 generic.go:334] "Generic (PLEG): container finished" podID="b862adb6-7c39-4a96-bddb-b35306031ebc" containerID="8bb2554be01b8fb2aca36b2ce880eea85e470f8fef763c5b4bda80f3548020a2" exitCode=0 Jan 29 07:00:01 crc kubenswrapper[4861]: I0129 07:00:01.746279 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" event={"ID":"b862adb6-7c39-4a96-bddb-b35306031ebc","Type":"ContainerDied","Data":"8bb2554be01b8fb2aca36b2ce880eea85e470f8fef763c5b4bda80f3548020a2"} Jan 29 07:00:01 crc kubenswrapper[4861]: I0129 07:00:01.746538 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" event={"ID":"b862adb6-7c39-4a96-bddb-b35306031ebc","Type":"ContainerStarted","Data":"dc85847ea8f6da3949792d573424fa204e967e2a2f013651f01a9cb643690a2b"} Jan 29 07:00:01 crc kubenswrapper[4861]: I0129 07:00:01.748248 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerID="752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5" exitCode=0 Jan 29 07:00:01 crc kubenswrapper[4861]: I0129 07:00:01.748297 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb48n" event={"ID":"f4e98fb1-8453-4881-afea-142a9bcb40ac","Type":"ContainerDied","Data":"752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5"} Jan 29 07:00:01 crc kubenswrapper[4861]: I0129 07:00:01.748321 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb48n" event={"ID":"f4e98fb1-8453-4881-afea-142a9bcb40ac","Type":"ContainerStarted","Data":"1b4a578238c555e00f49a4442caeeebe2c57c35ee512b3aa2d0fa67626087b99"} Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.059592 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.093705 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jprk\" (UniqueName: \"kubernetes.io/projected/b862adb6-7c39-4a96-bddb-b35306031ebc-kube-api-access-2jprk\") pod \"b862adb6-7c39-4a96-bddb-b35306031ebc\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.093771 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b862adb6-7c39-4a96-bddb-b35306031ebc-secret-volume\") pod \"b862adb6-7c39-4a96-bddb-b35306031ebc\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.093813 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b862adb6-7c39-4a96-bddb-b35306031ebc-config-volume\") pod \"b862adb6-7c39-4a96-bddb-b35306031ebc\" (UID: \"b862adb6-7c39-4a96-bddb-b35306031ebc\") " Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.094849 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b862adb6-7c39-4a96-bddb-b35306031ebc-config-volume" (OuterVolumeSpecName: "config-volume") pod "b862adb6-7c39-4a96-bddb-b35306031ebc" (UID: "b862adb6-7c39-4a96-bddb-b35306031ebc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.100696 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b862adb6-7c39-4a96-bddb-b35306031ebc-kube-api-access-2jprk" (OuterVolumeSpecName: "kube-api-access-2jprk") pod "b862adb6-7c39-4a96-bddb-b35306031ebc" (UID: "b862adb6-7c39-4a96-bddb-b35306031ebc"). InnerVolumeSpecName "kube-api-access-2jprk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.101328 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b862adb6-7c39-4a96-bddb-b35306031ebc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b862adb6-7c39-4a96-bddb-b35306031ebc" (UID: "b862adb6-7c39-4a96-bddb-b35306031ebc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.175216 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mt4xb" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.175298 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mt4xb" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.209366 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jprk\" (UniqueName: \"kubernetes.io/projected/b862adb6-7c39-4a96-bddb-b35306031ebc-kube-api-access-2jprk\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.211468 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b862adb6-7c39-4a96-bddb-b35306031ebc-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.211592 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b862adb6-7c39-4a96-bddb-b35306031ebc-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.225567 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mt4xb" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.768416 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" event={"ID":"b862adb6-7c39-4a96-bddb-b35306031ebc","Type":"ContainerDied","Data":"dc85847ea8f6da3949792d573424fa204e967e2a2f013651f01a9cb643690a2b"} Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.768494 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc85847ea8f6da3949792d573424fa204e967e2a2f013651f01a9cb643690a2b" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.768447 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9" Jan 29 07:00:03 crc kubenswrapper[4861]: I0129 07:00:03.818212 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mt4xb" Jan 29 07:00:04 crc kubenswrapper[4861]: I0129 07:00:04.401159 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mt4xb"] Jan 29 07:00:04 crc kubenswrapper[4861]: I0129 07:00:04.779835 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerID="a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1" exitCode=0 Jan 29 07:00:04 crc kubenswrapper[4861]: I0129 07:00:04.780011 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb48n" event={"ID":"f4e98fb1-8453-4881-afea-142a9bcb40ac","Type":"ContainerDied","Data":"a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1"} Jan 29 07:00:05 crc kubenswrapper[4861]: I0129 07:00:05.797410 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mt4xb" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" containerName="registry-server" containerID="cri-o://55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e" gracePeriod=2 Jan 29 07:00:05 crc kubenswrapper[4861]: I0129 07:00:05.798685 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb48n" event={"ID":"f4e98fb1-8453-4881-afea-142a9bcb40ac","Type":"ContainerStarted","Data":"d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a"} Jan 29 07:00:05 crc kubenswrapper[4861]: I0129 07:00:05.826605 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wb48n" podStartSLOduration=2.355720486 podStartE2EDuration="5.826579091s" podCreationTimestamp="2026-01-29 07:00:00 +0000 UTC" firstStartedPulling="2026-01-29 07:00:01.750496151 +0000 UTC m=+1493.421990708" lastFinishedPulling="2026-01-29 07:00:05.221354726 +0000 UTC m=+1496.892849313" observedRunningTime="2026-01-29 07:00:05.821485908 +0000 UTC m=+1497.492980505" watchObservedRunningTime="2026-01-29 07:00:05.826579091 +0000 UTC m=+1497.498073668" Jan 29 07:00:05 crc kubenswrapper[4861]: E0129 07:00:05.971055 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc57f3f4f_cad1_459a_87da_307f309751d9.slice/crio-55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc57f3f4f_cad1_459a_87da_307f309751d9.slice/crio-conmon-55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e.scope\": RecentStats: unable to find data in memory cache]" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.201168 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mt4xb" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.356484 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-catalog-content\") pod \"c57f3f4f-cad1-459a-87da-307f309751d9\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.356788 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-utilities\") pod \"c57f3f4f-cad1-459a-87da-307f309751d9\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.356840 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7ccb\" (UniqueName: \"kubernetes.io/projected/c57f3f4f-cad1-459a-87da-307f309751d9-kube-api-access-c7ccb\") pod \"c57f3f4f-cad1-459a-87da-307f309751d9\" (UID: \"c57f3f4f-cad1-459a-87da-307f309751d9\") " Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.358755 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-utilities" (OuterVolumeSpecName: "utilities") pod "c57f3f4f-cad1-459a-87da-307f309751d9" (UID: "c57f3f4f-cad1-459a-87da-307f309751d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.370628 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c57f3f4f-cad1-459a-87da-307f309751d9-kube-api-access-c7ccb" (OuterVolumeSpecName: "kube-api-access-c7ccb") pod "c57f3f4f-cad1-459a-87da-307f309751d9" (UID: "c57f3f4f-cad1-459a-87da-307f309751d9"). InnerVolumeSpecName "kube-api-access-c7ccb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.458639 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.458677 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7ccb\" (UniqueName: \"kubernetes.io/projected/c57f3f4f-cad1-459a-87da-307f309751d9-kube-api-access-c7ccb\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.607011 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c57f3f4f-cad1-459a-87da-307f309751d9" (UID: "c57f3f4f-cad1-459a-87da-307f309751d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.661350 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57f3f4f-cad1-459a-87da-307f309751d9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.818856 4861 generic.go:334] "Generic (PLEG): container finished" podID="c57f3f4f-cad1-459a-87da-307f309751d9" containerID="55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e" exitCode=0 Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.819937 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mt4xb" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.820792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mt4xb" event={"ID":"c57f3f4f-cad1-459a-87da-307f309751d9","Type":"ContainerDied","Data":"55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e"} Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.820829 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mt4xb" event={"ID":"c57f3f4f-cad1-459a-87da-307f309751d9","Type":"ContainerDied","Data":"820100471d6d3f647744ed14b95f86709a02e62a0f1b3ae2df32df8c24e43d5b"} Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.820848 4861 scope.go:117] "RemoveContainer" containerID="55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.864578 4861 scope.go:117] "RemoveContainer" containerID="ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.865586 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mt4xb"] Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.870949 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mt4xb"] Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.892330 4861 scope.go:117] "RemoveContainer" containerID="fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.917277 4861 scope.go:117] "RemoveContainer" containerID="55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e" Jan 29 07:00:06 crc kubenswrapper[4861]: E0129 07:00:06.917826 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e\": container with ID starting with 55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e not found: ID does not exist" containerID="55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.917958 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e"} err="failed to get container status \"55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e\": rpc error: code = NotFound desc = could not find container \"55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e\": container with ID starting with 55909653ec8486b8344d496d8311b25af7dd6e9da3a7aad1031c7cd828f72b0e not found: ID does not exist" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.918088 4861 scope.go:117] "RemoveContainer" containerID="ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919" Jan 29 07:00:06 crc kubenswrapper[4861]: E0129 07:00:06.920408 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919\": container with ID starting with ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919 not found: ID does not exist" containerID="ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.920452 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919"} err="failed to get container status \"ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919\": rpc error: code = NotFound desc = could not find container \"ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919\": container with ID starting with ad89847fc8b9cbb47eaf2ab5f2bb60de36f2cccf46c881bd7cf2487b17842919 not found: ID does not exist" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.920495 4861 scope.go:117] "RemoveContainer" containerID="fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81" Jan 29 07:00:06 crc kubenswrapper[4861]: E0129 07:00:06.921396 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81\": container with ID starting with fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81 not found: ID does not exist" containerID="fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81" Jan 29 07:00:06 crc kubenswrapper[4861]: I0129 07:00:06.921426 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81"} err="failed to get container status \"fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81\": rpc error: code = NotFound desc = could not find container \"fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81\": container with ID starting with fa83f83b0ef575123b95f3fe72ea01be7966ab4f946c5dc358bf59679bae5c81 not found: ID does not exist" Jan 29 07:00:07 crc kubenswrapper[4861]: I0129 07:00:07.125585 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" path="/var/lib/kubelet/pods/c57f3f4f-cad1-459a-87da-307f309751d9/volumes" Jan 29 07:00:10 crc kubenswrapper[4861]: I0129 07:00:10.526489 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:10 crc kubenswrapper[4861]: I0129 07:00:10.527211 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:10 crc kubenswrapper[4861]: I0129 07:00:10.602379 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:10 crc kubenswrapper[4861]: I0129 07:00:10.910540 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:11 crc kubenswrapper[4861]: I0129 07:00:11.798684 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb48n"] Jan 29 07:00:12 crc kubenswrapper[4861]: I0129 07:00:12.874387 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wb48n" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerName="registry-server" containerID="cri-o://d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a" gracePeriod=2 Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.285407 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.368504 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-catalog-content\") pod \"f4e98fb1-8453-4881-afea-142a9bcb40ac\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.368571 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-utilities\") pod \"f4e98fb1-8453-4881-afea-142a9bcb40ac\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.369533 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-utilities" (OuterVolumeSpecName: "utilities") pod "f4e98fb1-8453-4881-afea-142a9bcb40ac" (UID: "f4e98fb1-8453-4881-afea-142a9bcb40ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.398567 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4e98fb1-8453-4881-afea-142a9bcb40ac" (UID: "f4e98fb1-8453-4881-afea-142a9bcb40ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.470056 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shkm8\" (UniqueName: \"kubernetes.io/projected/f4e98fb1-8453-4881-afea-142a9bcb40ac-kube-api-access-shkm8\") pod \"f4e98fb1-8453-4881-afea-142a9bcb40ac\" (UID: \"f4e98fb1-8453-4881-afea-142a9bcb40ac\") " Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.470469 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.470490 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e98fb1-8453-4881-afea-142a9bcb40ac-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.477413 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4e98fb1-8453-4881-afea-142a9bcb40ac-kube-api-access-shkm8" (OuterVolumeSpecName: "kube-api-access-shkm8") pod "f4e98fb1-8453-4881-afea-142a9bcb40ac" (UID: "f4e98fb1-8453-4881-afea-142a9bcb40ac"). InnerVolumeSpecName "kube-api-access-shkm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.571785 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shkm8\" (UniqueName: \"kubernetes.io/projected/f4e98fb1-8453-4881-afea-142a9bcb40ac-kube-api-access-shkm8\") on node \"crc\" DevicePath \"\"" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.889550 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerID="d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a" exitCode=0 Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.889624 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb48n" event={"ID":"f4e98fb1-8453-4881-afea-142a9bcb40ac","Type":"ContainerDied","Data":"d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a"} Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.889636 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wb48n" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.889677 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb48n" event={"ID":"f4e98fb1-8453-4881-afea-142a9bcb40ac","Type":"ContainerDied","Data":"1b4a578238c555e00f49a4442caeeebe2c57c35ee512b3aa2d0fa67626087b99"} Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.889708 4861 scope.go:117] "RemoveContainer" containerID="d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.917365 4861 scope.go:117] "RemoveContainer" containerID="a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.936675 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb48n"] Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.945374 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb48n"] Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.959785 4861 scope.go:117] "RemoveContainer" containerID="752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.990229 4861 scope.go:117] "RemoveContainer" containerID="d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a" Jan 29 07:00:13 crc kubenswrapper[4861]: E0129 07:00:13.993775 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a\": container with ID starting with d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a not found: ID does not exist" containerID="d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.993825 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a"} err="failed to get container status \"d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a\": rpc error: code = NotFound desc = could not find container \"d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a\": container with ID starting with d4e2cfb377573beed751b026eaad4bcb113cf998e53bb081b581b433d72ef77a not found: ID does not exist" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.993851 4861 scope.go:117] "RemoveContainer" containerID="a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1" Jan 29 07:00:13 crc kubenswrapper[4861]: E0129 07:00:13.994252 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1\": container with ID starting with a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1 not found: ID does not exist" containerID="a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.994286 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1"} err="failed to get container status \"a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1\": rpc error: code = NotFound desc = could not find container \"a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1\": container with ID starting with a01a43e9d27f63ac68483a29d58aa72495d8649d5dc2f1027338270e9e5295e1 not found: ID does not exist" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.994304 4861 scope.go:117] "RemoveContainer" containerID="752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5" Jan 29 07:00:13 crc kubenswrapper[4861]: E0129 07:00:13.994626 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5\": container with ID starting with 752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5 not found: ID does not exist" containerID="752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5" Jan 29 07:00:13 crc kubenswrapper[4861]: I0129 07:00:13.994682 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5"} err="failed to get container status \"752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5\": rpc error: code = NotFound desc = could not find container \"752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5\": container with ID starting with 752316fad903bf6bef800aa7ff1f2771d1d5612299d88d97481e9c8232320cd5 not found: ID does not exist" Jan 29 07:00:15 crc kubenswrapper[4861]: I0129 07:00:15.129127 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" path="/var/lib/kubelet/pods/f4e98fb1-8453-4881-afea-142a9bcb40ac/volumes" Jan 29 07:00:21 crc kubenswrapper[4861]: I0129 07:00:21.893440 4861 scope.go:117] "RemoveContainer" containerID="eb5b13bfbfc3b8b7eee2ed233fc25bde57155c21687160ab5efe54426a560771" Jan 29 07:00:21 crc kubenswrapper[4861]: I0129 07:00:21.934158 4861 scope.go:117] "RemoveContainer" containerID="015f72c114bfd8ca01ff83fcfd7253c5311da2c4dbfdaa591c7feb5e53a0693d" Jan 29 07:00:21 crc kubenswrapper[4861]: I0129 07:00:21.971125 4861 scope.go:117] "RemoveContainer" containerID="7438af59a13cf43dd170a4e1157286f11336dd7d53efd066618939d16c7a2b84" Jan 29 07:00:22 crc kubenswrapper[4861]: I0129 07:00:22.005717 4861 scope.go:117] "RemoveContainer" containerID="4156976b77fcc59e40ba688e6dad758e650f38eff0bf2ea70256a0007e7bb53f" Jan 29 07:00:22 crc kubenswrapper[4861]: I0129 07:00:22.031437 4861 scope.go:117] "RemoveContainer" containerID="44cae61274ae498ec0e36b040cd3c6a823e229c9ed4442ae55a7ad53836e9dd2" Jan 29 07:00:22 crc kubenswrapper[4861]: I0129 07:00:22.061237 4861 scope.go:117] "RemoveContainer" containerID="a2c261703bc18ccdd7760e2b862c9a0b2f7a70eb7e01b0defc65c4d41b28a4a7" Jan 29 07:00:22 crc kubenswrapper[4861]: I0129 07:00:22.089999 4861 scope.go:117] "RemoveContainer" containerID="82d4301c7f8e1b6d25f3d60567395a1ff2635c17934b75e63917065ada770d83" Jan 29 07:00:22 crc kubenswrapper[4861]: I0129 07:00:22.106046 4861 scope.go:117] "RemoveContainer" containerID="2edde80755c98a48c8d503470908a51b138482e45d8b8affe30999cf1fe693fe" Jan 29 07:00:22 crc kubenswrapper[4861]: I0129 07:00:22.128925 4861 scope.go:117] "RemoveContainer" containerID="db29cf3498e6242cc345136e8b4ef9c1ff58fae25025e65ba05baf083fa22586" Jan 29 07:00:22 crc kubenswrapper[4861]: I0129 07:00:22.146133 4861 scope.go:117] "RemoveContainer" containerID="cb513fa4a3eb6b1c3c04f73daa8fa7574de85c7d15fbf53dfe97301e93d37653" Jan 29 07:00:22 crc kubenswrapper[4861]: I0129 07:00:22.166911 4861 scope.go:117] "RemoveContainer" containerID="f3570debc62991c7c1fa0d77b1dc28def8d437df67468235887383ae41cd48d3" Jan 29 07:00:30 crc kubenswrapper[4861]: I0129 07:00:30.630320 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:00:30 crc kubenswrapper[4861]: I0129 07:00:30.630923 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:00:30 crc kubenswrapper[4861]: I0129 07:00:30.630979 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:00:30 crc kubenswrapper[4861]: I0129 07:00:30.631724 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:00:30 crc kubenswrapper[4861]: I0129 07:00:30.631825 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" gracePeriod=600 Jan 29 07:00:30 crc kubenswrapper[4861]: E0129 07:00:30.761909 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:00:31 crc kubenswrapper[4861]: I0129 07:00:31.064165 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" exitCode=0 Jan 29 07:00:31 crc kubenswrapper[4861]: I0129 07:00:31.064249 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a"} Jan 29 07:00:31 crc kubenswrapper[4861]: I0129 07:00:31.064338 4861 scope.go:117] "RemoveContainer" containerID="85000e70e0f61206c55ed7e3495b90975c6a190d05beb488bbd436b08d076e87" Jan 29 07:00:31 crc kubenswrapper[4861]: I0129 07:00:31.065137 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:00:31 crc kubenswrapper[4861]: E0129 07:00:31.065627 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:00:44 crc kubenswrapper[4861]: I0129 07:00:44.116286 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:00:44 crc kubenswrapper[4861]: E0129 07:00:44.116951 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:00:55 crc kubenswrapper[4861]: I0129 07:00:55.117201 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:00:55 crc kubenswrapper[4861]: E0129 07:00:55.118127 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:01:10 crc kubenswrapper[4861]: I0129 07:01:10.116785 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:01:10 crc kubenswrapper[4861]: E0129 07:01:10.117630 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.348763 4861 scope.go:117] "RemoveContainer" containerID="4d253aba4ac2e77f20e972fc48cc6c3ae8fe5652146893e2a09943dcbeb6f768" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.383611 4861 scope.go:117] "RemoveContainer" containerID="f5ef15ca4fe37880a59d8aaf819b8144f64d194a16bcac94f22c457fa5014ff8" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.426152 4861 scope.go:117] "RemoveContainer" containerID="03fe6aeb6f9d7cf900046e36d87fe35f4d2343ed4395de8376bddce3ef4f9aad" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.458291 4861 scope.go:117] "RemoveContainer" containerID="c370cf25d946e29524a08b09bbd2c770d4dc22a304494f564e23437ef233b062" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.490666 4861 scope.go:117] "RemoveContainer" containerID="76374b1f353d893fdc3069b1615d67deeb913b0c2768471ba2baee2208611a03" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.514748 4861 scope.go:117] "RemoveContainer" containerID="e80f9259bf11bb94852f82dccdace8a12c0dd218cca38f8e88d6fe6d72cabee2" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.559167 4861 scope.go:117] "RemoveContainer" containerID="3412f37a50f007d599dc119840663f4df1024fd44f7e0620807159235732c151" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.580630 4861 scope.go:117] "RemoveContainer" containerID="b54a873352b23c17ed2e2e12b008503e125d8e142b83ddde0068a40136cd6a56" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.613430 4861 scope.go:117] "RemoveContainer" containerID="c3d836d7ff030fca36739ea9a5f282a8be67bbbe4c8a440b3b995a0d0c5dc317" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.646174 4861 scope.go:117] "RemoveContainer" containerID="1032eb6199237465104797eee73e2d72aa2b935e6b067c7d59fbd6d04743a636" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.677234 4861 scope.go:117] "RemoveContainer" containerID="ba7f7b8aa0a69ef1edc92b25a04e8ad86840de8d862f70596abc49240736dd9c" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.729706 4861 scope.go:117] "RemoveContainer" containerID="7691ae12746c8e5b7624749ffb2b6ab9a340077db2194de6534ca14f13adf6b7" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.764832 4861 scope.go:117] "RemoveContainer" containerID="39b1268ad20f3405e1e31dcaa8ad34a915ba32a7d48ac681513976c40074ca2f" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.800602 4861 scope.go:117] "RemoveContainer" containerID="a3ba03aed1e2eed08ca2c4db3d49eda7c8351432864bca739efc9fef03b3696d" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.826065 4861 scope.go:117] "RemoveContainer" containerID="048dbf5066137bb4e571a0ddf6f23cd5da6fb05d04084cbec4c293105909f316" Jan 29 07:01:22 crc kubenswrapper[4861]: I0129 07:01:22.854106 4861 scope.go:117] "RemoveContainer" containerID="2aa3ba3724453a514e3901ddec1d2c614029a3617223ce7a100d3128aa058a01" Jan 29 07:01:25 crc kubenswrapper[4861]: I0129 07:01:25.116832 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:01:25 crc kubenswrapper[4861]: E0129 07:01:25.117821 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.430884 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-26sk7"] Jan 29 07:01:28 crc kubenswrapper[4861]: E0129 07:01:28.431723 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerName="extract-content" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.431740 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerName="extract-content" Jan 29 07:01:28 crc kubenswrapper[4861]: E0129 07:01:28.431751 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" containerName="extract-utilities" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.431759 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" containerName="extract-utilities" Jan 29 07:01:28 crc kubenswrapper[4861]: E0129 07:01:28.431773 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b862adb6-7c39-4a96-bddb-b35306031ebc" containerName="collect-profiles" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.431780 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b862adb6-7c39-4a96-bddb-b35306031ebc" containerName="collect-profiles" Jan 29 07:01:28 crc kubenswrapper[4861]: E0129 07:01:28.431796 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerName="registry-server" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.431803 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerName="registry-server" Jan 29 07:01:28 crc kubenswrapper[4861]: E0129 07:01:28.431822 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerName="extract-utilities" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.431829 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerName="extract-utilities" Jan 29 07:01:28 crc kubenswrapper[4861]: E0129 07:01:28.431844 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" containerName="extract-content" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.431850 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" containerName="extract-content" Jan 29 07:01:28 crc kubenswrapper[4861]: E0129 07:01:28.431871 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" containerName="registry-server" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.431876 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" containerName="registry-server" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.432022 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4e98fb1-8453-4881-afea-142a9bcb40ac" containerName="registry-server" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.432040 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57f3f4f-cad1-459a-87da-307f309751d9" containerName="registry-server" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.432051 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b862adb6-7c39-4a96-bddb-b35306031ebc" containerName="collect-profiles" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.433240 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.444332 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-26sk7"] Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.574155 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-catalog-content\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.574222 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr8tj\" (UniqueName: \"kubernetes.io/projected/9403f2a5-4496-4a55-b02d-e439af3c7428-kube-api-access-jr8tj\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.574312 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-utilities\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.675588 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-catalog-content\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.675640 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr8tj\" (UniqueName: \"kubernetes.io/projected/9403f2a5-4496-4a55-b02d-e439af3c7428-kube-api-access-jr8tj\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.675710 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-utilities\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.676280 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-catalog-content\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.676307 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-utilities\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.698026 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr8tj\" (UniqueName: \"kubernetes.io/projected/9403f2a5-4496-4a55-b02d-e439af3c7428-kube-api-access-jr8tj\") pod \"certified-operators-26sk7\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:28 crc kubenswrapper[4861]: I0129 07:01:28.766298 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:29 crc kubenswrapper[4861]: I0129 07:01:29.261463 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-26sk7"] Jan 29 07:01:29 crc kubenswrapper[4861]: I0129 07:01:29.831817 4861 generic.go:334] "Generic (PLEG): container finished" podID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerID="98232ce3eb6ba6b6f24061b31dc801a28cf8666c781ef903e6d5db311dc81d37" exitCode=0 Jan 29 07:01:29 crc kubenswrapper[4861]: I0129 07:01:29.831930 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-26sk7" event={"ID":"9403f2a5-4496-4a55-b02d-e439af3c7428","Type":"ContainerDied","Data":"98232ce3eb6ba6b6f24061b31dc801a28cf8666c781ef903e6d5db311dc81d37"} Jan 29 07:01:29 crc kubenswrapper[4861]: I0129 07:01:29.832356 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-26sk7" event={"ID":"9403f2a5-4496-4a55-b02d-e439af3c7428","Type":"ContainerStarted","Data":"d08cfad03de3e5b5380480672d1e3282870a484acaeccc752a1456493ddd01bb"} Jan 29 07:01:30 crc kubenswrapper[4861]: I0129 07:01:30.843567 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-26sk7" event={"ID":"9403f2a5-4496-4a55-b02d-e439af3c7428","Type":"ContainerStarted","Data":"07bd65d3ad5a6aef4a4a8d608a849eb6bf5f7cbdfa6bab8ce785f3786b546689"} Jan 29 07:01:31 crc kubenswrapper[4861]: I0129 07:01:31.852960 4861 generic.go:334] "Generic (PLEG): container finished" podID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerID="07bd65d3ad5a6aef4a4a8d608a849eb6bf5f7cbdfa6bab8ce785f3786b546689" exitCode=0 Jan 29 07:01:31 crc kubenswrapper[4861]: I0129 07:01:31.853022 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-26sk7" event={"ID":"9403f2a5-4496-4a55-b02d-e439af3c7428","Type":"ContainerDied","Data":"07bd65d3ad5a6aef4a4a8d608a849eb6bf5f7cbdfa6bab8ce785f3786b546689"} Jan 29 07:01:33 crc kubenswrapper[4861]: I0129 07:01:33.877423 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-26sk7" event={"ID":"9403f2a5-4496-4a55-b02d-e439af3c7428","Type":"ContainerStarted","Data":"9592eb3efd7768db6602e8524eb5daadff9b6a75e0c9974fb0f5c51d5e98b4c4"} Jan 29 07:01:33 crc kubenswrapper[4861]: I0129 07:01:33.902606 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-26sk7" podStartSLOduration=2.465401489 podStartE2EDuration="5.902575212s" podCreationTimestamp="2026-01-29 07:01:28 +0000 UTC" firstStartedPulling="2026-01-29 07:01:29.834004968 +0000 UTC m=+1581.505499565" lastFinishedPulling="2026-01-29 07:01:33.271178731 +0000 UTC m=+1584.942673288" observedRunningTime="2026-01-29 07:01:33.89372716 +0000 UTC m=+1585.565221757" watchObservedRunningTime="2026-01-29 07:01:33.902575212 +0000 UTC m=+1585.574069809" Jan 29 07:01:38 crc kubenswrapper[4861]: I0129 07:01:38.767651 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:38 crc kubenswrapper[4861]: I0129 07:01:38.768128 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:38 crc kubenswrapper[4861]: I0129 07:01:38.843105 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:38 crc kubenswrapper[4861]: I0129 07:01:38.993577 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:39 crc kubenswrapper[4861]: I0129 07:01:39.098409 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-26sk7"] Jan 29 07:01:40 crc kubenswrapper[4861]: I0129 07:01:40.118658 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:01:40 crc kubenswrapper[4861]: E0129 07:01:40.119012 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:01:40 crc kubenswrapper[4861]: I0129 07:01:40.945715 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-26sk7" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerName="registry-server" containerID="cri-o://9592eb3efd7768db6602e8524eb5daadff9b6a75e0c9974fb0f5c51d5e98b4c4" gracePeriod=2 Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.007205 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-26sk7_9403f2a5-4496-4a55-b02d-e439af3c7428/registry-server/0.log" Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.008590 4861 generic.go:334] "Generic (PLEG): container finished" podID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerID="9592eb3efd7768db6602e8524eb5daadff9b6a75e0c9974fb0f5c51d5e98b4c4" exitCode=137 Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.008645 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-26sk7" event={"ID":"9403f2a5-4496-4a55-b02d-e439af3c7428","Type":"ContainerDied","Data":"9592eb3efd7768db6602e8524eb5daadff9b6a75e0c9974fb0f5c51d5e98b4c4"} Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.088982 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-26sk7_9403f2a5-4496-4a55-b02d-e439af3c7428/registry-server/0.log" Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.090295 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.175278 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jr8tj\" (UniqueName: \"kubernetes.io/projected/9403f2a5-4496-4a55-b02d-e439af3c7428-kube-api-access-jr8tj\") pod \"9403f2a5-4496-4a55-b02d-e439af3c7428\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.175349 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-utilities\") pod \"9403f2a5-4496-4a55-b02d-e439af3c7428\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.175386 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-catalog-content\") pod \"9403f2a5-4496-4a55-b02d-e439af3c7428\" (UID: \"9403f2a5-4496-4a55-b02d-e439af3c7428\") " Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.176542 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-utilities" (OuterVolumeSpecName: "utilities") pod "9403f2a5-4496-4a55-b02d-e439af3c7428" (UID: "9403f2a5-4496-4a55-b02d-e439af3c7428"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.184272 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9403f2a5-4496-4a55-b02d-e439af3c7428-kube-api-access-jr8tj" (OuterVolumeSpecName: "kube-api-access-jr8tj") pod "9403f2a5-4496-4a55-b02d-e439af3c7428" (UID: "9403f2a5-4496-4a55-b02d-e439af3c7428"). InnerVolumeSpecName "kube-api-access-jr8tj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.222844 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9403f2a5-4496-4a55-b02d-e439af3c7428" (UID: "9403f2a5-4496-4a55-b02d-e439af3c7428"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.277996 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jr8tj\" (UniqueName: \"kubernetes.io/projected/9403f2a5-4496-4a55-b02d-e439af3c7428-kube-api-access-jr8tj\") on node \"crc\" DevicePath \"\"" Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.278041 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:01:47 crc kubenswrapper[4861]: I0129 07:01:47.278056 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9403f2a5-4496-4a55-b02d-e439af3c7428-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:01:48 crc kubenswrapper[4861]: I0129 07:01:48.016638 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-26sk7_9403f2a5-4496-4a55-b02d-e439af3c7428/registry-server/0.log" Jan 29 07:01:48 crc kubenswrapper[4861]: I0129 07:01:48.017976 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-26sk7" event={"ID":"9403f2a5-4496-4a55-b02d-e439af3c7428","Type":"ContainerDied","Data":"d08cfad03de3e5b5380480672d1e3282870a484acaeccc752a1456493ddd01bb"} Jan 29 07:01:48 crc kubenswrapper[4861]: I0129 07:01:48.018031 4861 scope.go:117] "RemoveContainer" containerID="9592eb3efd7768db6602e8524eb5daadff9b6a75e0c9974fb0f5c51d5e98b4c4" Jan 29 07:01:48 crc kubenswrapper[4861]: I0129 07:01:48.018050 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-26sk7" Jan 29 07:01:48 crc kubenswrapper[4861]: I0129 07:01:48.048093 4861 scope.go:117] "RemoveContainer" containerID="07bd65d3ad5a6aef4a4a8d608a849eb6bf5f7cbdfa6bab8ce785f3786b546689" Jan 29 07:01:48 crc kubenswrapper[4861]: I0129 07:01:48.055278 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-26sk7"] Jan 29 07:01:48 crc kubenswrapper[4861]: I0129 07:01:48.064878 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-26sk7"] Jan 29 07:01:48 crc kubenswrapper[4861]: I0129 07:01:48.081335 4861 scope.go:117] "RemoveContainer" containerID="98232ce3eb6ba6b6f24061b31dc801a28cf8666c781ef903e6d5db311dc81d37" Jan 29 07:01:49 crc kubenswrapper[4861]: I0129 07:01:49.123704 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" path="/var/lib/kubelet/pods/9403f2a5-4496-4a55-b02d-e439af3c7428/volumes" Jan 29 07:01:54 crc kubenswrapper[4861]: I0129 07:01:54.117185 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:01:54 crc kubenswrapper[4861]: E0129 07:01:54.117953 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:02:06 crc kubenswrapper[4861]: I0129 07:02:06.116675 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:02:06 crc kubenswrapper[4861]: E0129 07:02:06.117382 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:02:18 crc kubenswrapper[4861]: I0129 07:02:18.116720 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:02:18 crc kubenswrapper[4861]: E0129 07:02:18.117151 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.102547 4861 scope.go:117] "RemoveContainer" containerID="9fd75cdc74ab09059f227b09ddff0f8e7c83f3521d26bb3444075ede07ae852b" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.129605 4861 scope.go:117] "RemoveContainer" containerID="1e634fdcf03c8f53282b290c793d6b040cffed4dbe42c8934c1ba276df36ae3b" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.197467 4861 scope.go:117] "RemoveContainer" containerID="cebbe28835a9e678bb4753a3aaa2c633f2bc639bbdc370c3518908db12d571a1" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.238577 4861 scope.go:117] "RemoveContainer" containerID="fd9675094dbc5e4671db27d2d11399c30b5682eca9316dcb9802ca14217ef8f4" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.254187 4861 scope.go:117] "RemoveContainer" containerID="9f6be3de9ba208665f85ce4d4db605cb993a64e30c2100a8a6eccfa07d1e0a8f" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.277896 4861 scope.go:117] "RemoveContainer" containerID="a5f64eec477c12ec38ba2fac8b31e70090fd279bc5e09a3440fc3328dc16aa74" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.296845 4861 scope.go:117] "RemoveContainer" containerID="7508e3d4514c3d722dddc4711161afe75d06331d15db9417baef8bca8f91efbe" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.324614 4861 scope.go:117] "RemoveContainer" containerID="201cdbdb2df08c9c35f6540911640e71c10ebe7ff1718cb7caf09db3f61c1c95" Jan 29 07:02:23 crc kubenswrapper[4861]: I0129 07:02:23.345637 4861 scope.go:117] "RemoveContainer" containerID="fe7450ba0e402da5c2f5d7e5d0760f7d1e0f5894d9b62a87940fe5d055af49f2" Jan 29 07:02:31 crc kubenswrapper[4861]: I0129 07:02:31.117022 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:02:31 crc kubenswrapper[4861]: E0129 07:02:31.118060 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:02:42 crc kubenswrapper[4861]: I0129 07:02:42.116567 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:02:42 crc kubenswrapper[4861]: E0129 07:02:42.117657 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:02:57 crc kubenswrapper[4861]: I0129 07:02:57.116324 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:02:57 crc kubenswrapper[4861]: E0129 07:02:57.117194 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:03:11 crc kubenswrapper[4861]: I0129 07:03:11.117627 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:03:11 crc kubenswrapper[4861]: E0129 07:03:11.119016 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:03:23 crc kubenswrapper[4861]: I0129 07:03:23.508355 4861 scope.go:117] "RemoveContainer" containerID="2f518cab156f62fbd5298f70524670678e1c98100d3ecb6f8936540f50df39da" Jan 29 07:03:23 crc kubenswrapper[4861]: I0129 07:03:23.555514 4861 scope.go:117] "RemoveContainer" containerID="4b5361eb4f97b1f0c0a43cdfc8bba3497eb615870aae7ed31b56818124abc7f5" Jan 29 07:03:23 crc kubenswrapper[4861]: I0129 07:03:23.590225 4861 scope.go:117] "RemoveContainer" containerID="6756abe8a1b0340d3aa8c881cb242ac677f7ceda5ed061f5774b08d04e550a63" Jan 29 07:03:23 crc kubenswrapper[4861]: I0129 07:03:23.625726 4861 scope.go:117] "RemoveContainer" containerID="2578f711dc2641e8fba1a7313e4c5059d379b749e689eba9c1b05fac506fd409" Jan 29 07:03:23 crc kubenswrapper[4861]: I0129 07:03:23.644850 4861 scope.go:117] "RemoveContainer" containerID="1e34d07e14aab00e720ca02ec07f36951679f3fedc5ba47bee1c985e8b91e13e" Jan 29 07:03:23 crc kubenswrapper[4861]: I0129 07:03:23.667101 4861 scope.go:117] "RemoveContainer" containerID="331e3c80a98fd0724c50967f39eed912e8c3b62cc52bed54eea4c135e30f8704" Jan 29 07:03:25 crc kubenswrapper[4861]: I0129 07:03:25.116605 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:03:25 crc kubenswrapper[4861]: E0129 07:03:25.117098 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:03:40 crc kubenswrapper[4861]: I0129 07:03:40.116001 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:03:40 crc kubenswrapper[4861]: E0129 07:03:40.116644 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:03:54 crc kubenswrapper[4861]: I0129 07:03:54.116343 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:03:54 crc kubenswrapper[4861]: E0129 07:03:54.117217 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:04:06 crc kubenswrapper[4861]: I0129 07:04:06.116605 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:04:06 crc kubenswrapper[4861]: E0129 07:04:06.117696 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:04:17 crc kubenswrapper[4861]: I0129 07:04:17.117268 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:04:17 crc kubenswrapper[4861]: E0129 07:04:17.118508 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:04:23 crc kubenswrapper[4861]: I0129 07:04:23.792337 4861 scope.go:117] "RemoveContainer" containerID="6da086fcc4b00f89b80a9fff03917faac0c0299e635e3b23448c43c2f19c72ad" Jan 29 07:04:28 crc kubenswrapper[4861]: I0129 07:04:28.116838 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:04:28 crc kubenswrapper[4861]: E0129 07:04:28.117349 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:04:43 crc kubenswrapper[4861]: I0129 07:04:43.116402 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:04:43 crc kubenswrapper[4861]: E0129 07:04:43.117044 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:04:58 crc kubenswrapper[4861]: I0129 07:04:58.117209 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:04:58 crc kubenswrapper[4861]: E0129 07:04:58.118494 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:05:10 crc kubenswrapper[4861]: I0129 07:05:10.116050 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:05:10 crc kubenswrapper[4861]: E0129 07:05:10.116943 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:05:22 crc kubenswrapper[4861]: I0129 07:05:22.129619 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:05:22 crc kubenswrapper[4861]: E0129 07:05:22.131231 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:05:34 crc kubenswrapper[4861]: I0129 07:05:34.116407 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:05:34 crc kubenswrapper[4861]: I0129 07:05:34.928046 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"468974c50da9310169ee70656db17c4be121d549832a7ebe2f714fecd2a6908b"} Jan 29 07:08:00 crc kubenswrapper[4861]: I0129 07:08:00.629835 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:08:00 crc kubenswrapper[4861]: I0129 07:08:00.631244 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:08:30 crc kubenswrapper[4861]: I0129 07:08:30.629559 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:08:30 crc kubenswrapper[4861]: I0129 07:08:30.630267 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.531796 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zkbhd"] Jan 29 07:08:47 crc kubenswrapper[4861]: E0129 07:08:47.532736 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerName="extract-content" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.532756 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerName="extract-content" Jan 29 07:08:47 crc kubenswrapper[4861]: E0129 07:08:47.532784 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerName="registry-server" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.532792 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerName="registry-server" Jan 29 07:08:47 crc kubenswrapper[4861]: E0129 07:08:47.532807 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerName="extract-utilities" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.532816 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerName="extract-utilities" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.532998 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9403f2a5-4496-4a55-b02d-e439af3c7428" containerName="registry-server" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.539716 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.553805 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zkbhd"] Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.689823 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-catalog-content\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.689893 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-utilities\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.689936 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lnlf\" (UniqueName: \"kubernetes.io/projected/14a3f780-17e6-4f07-9785-5e6f7934e90d-kube-api-access-5lnlf\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.791807 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-catalog-content\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.791906 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-utilities\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.791939 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lnlf\" (UniqueName: \"kubernetes.io/projected/14a3f780-17e6-4f07-9785-5e6f7934e90d-kube-api-access-5lnlf\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.792499 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-catalog-content\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.792536 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-utilities\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.819473 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lnlf\" (UniqueName: \"kubernetes.io/projected/14a3f780-17e6-4f07-9785-5e6f7934e90d-kube-api-access-5lnlf\") pod \"redhat-operators-zkbhd\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:47 crc kubenswrapper[4861]: I0129 07:08:47.864561 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:48 crc kubenswrapper[4861]: I0129 07:08:48.272816 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zkbhd"] Jan 29 07:08:48 crc kubenswrapper[4861]: I0129 07:08:48.597374 4861 generic.go:334] "Generic (PLEG): container finished" podID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerID="f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732" exitCode=0 Jan 29 07:08:48 crc kubenswrapper[4861]: I0129 07:08:48.599115 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zkbhd" event={"ID":"14a3f780-17e6-4f07-9785-5e6f7934e90d","Type":"ContainerDied","Data":"f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732"} Jan 29 07:08:48 crc kubenswrapper[4861]: I0129 07:08:48.599237 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zkbhd" event={"ID":"14a3f780-17e6-4f07-9785-5e6f7934e90d","Type":"ContainerStarted","Data":"99e590be98dba1b77f27bc163b0247ab97f7b515ed42d949caf1e71b77b17e6b"} Jan 29 07:08:48 crc kubenswrapper[4861]: I0129 07:08:48.599377 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 07:08:50 crc kubenswrapper[4861]: I0129 07:08:50.616897 4861 generic.go:334] "Generic (PLEG): container finished" podID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerID="74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b" exitCode=0 Jan 29 07:08:50 crc kubenswrapper[4861]: I0129 07:08:50.616935 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zkbhd" event={"ID":"14a3f780-17e6-4f07-9785-5e6f7934e90d","Type":"ContainerDied","Data":"74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b"} Jan 29 07:08:51 crc kubenswrapper[4861]: I0129 07:08:51.625991 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zkbhd" event={"ID":"14a3f780-17e6-4f07-9785-5e6f7934e90d","Type":"ContainerStarted","Data":"937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be"} Jan 29 07:08:51 crc kubenswrapper[4861]: I0129 07:08:51.650233 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zkbhd" podStartSLOduration=2.210837628 podStartE2EDuration="4.650215226s" podCreationTimestamp="2026-01-29 07:08:47 +0000 UTC" firstStartedPulling="2026-01-29 07:08:48.599166602 +0000 UTC m=+2020.270661159" lastFinishedPulling="2026-01-29 07:08:51.03854419 +0000 UTC m=+2022.710038757" observedRunningTime="2026-01-29 07:08:51.647832963 +0000 UTC m=+2023.319327530" watchObservedRunningTime="2026-01-29 07:08:51.650215226 +0000 UTC m=+2023.321709783" Jan 29 07:08:57 crc kubenswrapper[4861]: I0129 07:08:57.865005 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:57 crc kubenswrapper[4861]: I0129 07:08:57.866438 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:57 crc kubenswrapper[4861]: I0129 07:08:57.914128 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:58 crc kubenswrapper[4861]: I0129 07:08:58.727634 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:08:58 crc kubenswrapper[4861]: I0129 07:08:58.776329 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zkbhd"] Jan 29 07:09:00 crc kubenswrapper[4861]: I0129 07:09:00.629877 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:09:00 crc kubenswrapper[4861]: I0129 07:09:00.629971 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:09:00 crc kubenswrapper[4861]: I0129 07:09:00.630036 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:09:00 crc kubenswrapper[4861]: I0129 07:09:00.630974 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"468974c50da9310169ee70656db17c4be121d549832a7ebe2f714fecd2a6908b"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:09:00 crc kubenswrapper[4861]: I0129 07:09:00.631109 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://468974c50da9310169ee70656db17c4be121d549832a7ebe2f714fecd2a6908b" gracePeriod=600 Jan 29 07:09:00 crc kubenswrapper[4861]: I0129 07:09:00.700221 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zkbhd" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerName="registry-server" containerID="cri-o://937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be" gracePeriod=2 Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.692285 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.709734 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="468974c50da9310169ee70656db17c4be121d549832a7ebe2f714fecd2a6908b" exitCode=0 Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.709862 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"468974c50da9310169ee70656db17c4be121d549832a7ebe2f714fecd2a6908b"} Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.710515 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4"} Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.710649 4861 scope.go:117] "RemoveContainer" containerID="33f9430e25cbfd0e9f9460b9323c59687ddb075da70d1174dc392eca7e72d93a" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.714635 4861 generic.go:334] "Generic (PLEG): container finished" podID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerID="937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be" exitCode=0 Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.714680 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zkbhd" event={"ID":"14a3f780-17e6-4f07-9785-5e6f7934e90d","Type":"ContainerDied","Data":"937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be"} Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.714709 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zkbhd" event={"ID":"14a3f780-17e6-4f07-9785-5e6f7934e90d","Type":"ContainerDied","Data":"99e590be98dba1b77f27bc163b0247ab97f7b515ed42d949caf1e71b77b17e6b"} Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.714778 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zkbhd" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.745969 4861 scope.go:117] "RemoveContainer" containerID="937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.777410 4861 scope.go:117] "RemoveContainer" containerID="74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.804757 4861 scope.go:117] "RemoveContainer" containerID="f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.807250 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lnlf\" (UniqueName: \"kubernetes.io/projected/14a3f780-17e6-4f07-9785-5e6f7934e90d-kube-api-access-5lnlf\") pod \"14a3f780-17e6-4f07-9785-5e6f7934e90d\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.807313 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-utilities\") pod \"14a3f780-17e6-4f07-9785-5e6f7934e90d\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.807357 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-catalog-content\") pod \"14a3f780-17e6-4f07-9785-5e6f7934e90d\" (UID: \"14a3f780-17e6-4f07-9785-5e6f7934e90d\") " Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.808564 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-utilities" (OuterVolumeSpecName: "utilities") pod "14a3f780-17e6-4f07-9785-5e6f7934e90d" (UID: "14a3f780-17e6-4f07-9785-5e6f7934e90d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.808879 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.831279 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14a3f780-17e6-4f07-9785-5e6f7934e90d-kube-api-access-5lnlf" (OuterVolumeSpecName: "kube-api-access-5lnlf") pod "14a3f780-17e6-4f07-9785-5e6f7934e90d" (UID: "14a3f780-17e6-4f07-9785-5e6f7934e90d"). InnerVolumeSpecName "kube-api-access-5lnlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.850388 4861 scope.go:117] "RemoveContainer" containerID="937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be" Jan 29 07:09:01 crc kubenswrapper[4861]: E0129 07:09:01.851316 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be\": container with ID starting with 937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be not found: ID does not exist" containerID="937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.851388 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be"} err="failed to get container status \"937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be\": rpc error: code = NotFound desc = could not find container \"937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be\": container with ID starting with 937739abc855312c93ab8464e91edf3e3c11ee66cd69e2c0197486cc426180be not found: ID does not exist" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.851427 4861 scope.go:117] "RemoveContainer" containerID="74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b" Jan 29 07:09:01 crc kubenswrapper[4861]: E0129 07:09:01.851740 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b\": container with ID starting with 74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b not found: ID does not exist" containerID="74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.851764 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b"} err="failed to get container status \"74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b\": rpc error: code = NotFound desc = could not find container \"74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b\": container with ID starting with 74a0df8a72d0798074bb6ee36f5b67f5cc2f4831ebf06b4045ed942530e9806b not found: ID does not exist" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.851777 4861 scope.go:117] "RemoveContainer" containerID="f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732" Jan 29 07:09:01 crc kubenswrapper[4861]: E0129 07:09:01.852044 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732\": container with ID starting with f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732 not found: ID does not exist" containerID="f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.852088 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732"} err="failed to get container status \"f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732\": rpc error: code = NotFound desc = could not find container \"f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732\": container with ID starting with f54083d396008ca16db638dbef5c925b49a3194aaf8e2c41426b6ba82276c732 not found: ID does not exist" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.911317 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lnlf\" (UniqueName: \"kubernetes.io/projected/14a3f780-17e6-4f07-9785-5e6f7934e90d-kube-api-access-5lnlf\") on node \"crc\" DevicePath \"\"" Jan 29 07:09:01 crc kubenswrapper[4861]: I0129 07:09:01.967119 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14a3f780-17e6-4f07-9785-5e6f7934e90d" (UID: "14a3f780-17e6-4f07-9785-5e6f7934e90d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:09:02 crc kubenswrapper[4861]: I0129 07:09:02.012328 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14a3f780-17e6-4f07-9785-5e6f7934e90d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:09:02 crc kubenswrapper[4861]: I0129 07:09:02.053319 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zkbhd"] Jan 29 07:09:02 crc kubenswrapper[4861]: I0129 07:09:02.061654 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zkbhd"] Jan 29 07:09:03 crc kubenswrapper[4861]: I0129 07:09:03.130945 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" path="/var/lib/kubelet/pods/14a3f780-17e6-4f07-9785-5e6f7934e90d/volumes" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.777986 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xds5n"] Jan 29 07:10:36 crc kubenswrapper[4861]: E0129 07:10:36.779167 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerName="registry-server" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.779193 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerName="registry-server" Jan 29 07:10:36 crc kubenswrapper[4861]: E0129 07:10:36.779216 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerName="extract-utilities" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.779228 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerName="extract-utilities" Jan 29 07:10:36 crc kubenswrapper[4861]: E0129 07:10:36.779246 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerName="extract-content" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.779257 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerName="extract-content" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.779463 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="14a3f780-17e6-4f07-9785-5e6f7934e90d" containerName="registry-server" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.780715 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.810434 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xds5n"] Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.903713 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-catalog-content\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.903804 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4cld\" (UniqueName: \"kubernetes.io/projected/ef9c035d-1064-498a-a95f-98c935ee0bac-kube-api-access-s4cld\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:36 crc kubenswrapper[4861]: I0129 07:10:36.903852 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-utilities\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:37 crc kubenswrapper[4861]: I0129 07:10:37.005142 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4cld\" (UniqueName: \"kubernetes.io/projected/ef9c035d-1064-498a-a95f-98c935ee0bac-kube-api-access-s4cld\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:37 crc kubenswrapper[4861]: I0129 07:10:37.005218 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-utilities\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:37 crc kubenswrapper[4861]: I0129 07:10:37.005262 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-catalog-content\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:37 crc kubenswrapper[4861]: I0129 07:10:37.005742 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-catalog-content\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:37 crc kubenswrapper[4861]: I0129 07:10:37.005858 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-utilities\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:37 crc kubenswrapper[4861]: I0129 07:10:37.027092 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4cld\" (UniqueName: \"kubernetes.io/projected/ef9c035d-1064-498a-a95f-98c935ee0bac-kube-api-access-s4cld\") pod \"redhat-marketplace-xds5n\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:37 crc kubenswrapper[4861]: I0129 07:10:37.114784 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:37 crc kubenswrapper[4861]: I0129 07:10:37.654099 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xds5n"] Jan 29 07:10:38 crc kubenswrapper[4861]: E0129 07:10:38.080234 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef9c035d_1064_498a_a95f_98c935ee0bac.slice/crio-0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef9c035d_1064_498a_a95f_98c935ee0bac.slice/crio-conmon-0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f.scope\": RecentStats: unable to find data in memory cache]" Jan 29 07:10:38 crc kubenswrapper[4861]: I0129 07:10:38.510680 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xds5n" event={"ID":"ef9c035d-1064-498a-a95f-98c935ee0bac","Type":"ContainerDied","Data":"0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f"} Jan 29 07:10:38 crc kubenswrapper[4861]: I0129 07:10:38.510607 4861 generic.go:334] "Generic (PLEG): container finished" podID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerID="0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f" exitCode=0 Jan 29 07:10:38 crc kubenswrapper[4861]: I0129 07:10:38.511329 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xds5n" event={"ID":"ef9c035d-1064-498a-a95f-98c935ee0bac","Type":"ContainerStarted","Data":"ff4f7647f4acb4b4ced02d4bd780b73a5456bf19c6840fe7a9acd9e75400d3e0"} Jan 29 07:10:39 crc kubenswrapper[4861]: I0129 07:10:39.520194 4861 generic.go:334] "Generic (PLEG): container finished" podID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerID="4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8" exitCode=0 Jan 29 07:10:39 crc kubenswrapper[4861]: I0129 07:10:39.520306 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xds5n" event={"ID":"ef9c035d-1064-498a-a95f-98c935ee0bac","Type":"ContainerDied","Data":"4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8"} Jan 29 07:10:40 crc kubenswrapper[4861]: I0129 07:10:40.531427 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xds5n" event={"ID":"ef9c035d-1064-498a-a95f-98c935ee0bac","Type":"ContainerStarted","Data":"dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae"} Jan 29 07:10:40 crc kubenswrapper[4861]: I0129 07:10:40.549900 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xds5n" podStartSLOduration=3.106535268 podStartE2EDuration="4.549879815s" podCreationTimestamp="2026-01-29 07:10:36 +0000 UTC" firstStartedPulling="2026-01-29 07:10:38.512415899 +0000 UTC m=+2130.183910456" lastFinishedPulling="2026-01-29 07:10:39.955760426 +0000 UTC m=+2131.627255003" observedRunningTime="2026-01-29 07:10:40.548420887 +0000 UTC m=+2132.219915454" watchObservedRunningTime="2026-01-29 07:10:40.549879815 +0000 UTC m=+2132.221374382" Jan 29 07:10:47 crc kubenswrapper[4861]: I0129 07:10:47.115166 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:47 crc kubenswrapper[4861]: I0129 07:10:47.133045 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:47 crc kubenswrapper[4861]: I0129 07:10:47.187524 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:47 crc kubenswrapper[4861]: I0129 07:10:47.627923 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:47 crc kubenswrapper[4861]: I0129 07:10:47.671564 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xds5n"] Jan 29 07:10:49 crc kubenswrapper[4861]: I0129 07:10:49.615547 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xds5n" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerName="registry-server" containerID="cri-o://dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae" gracePeriod=2 Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.152710 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.195625 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-utilities\") pod \"ef9c035d-1064-498a-a95f-98c935ee0bac\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.195714 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-catalog-content\") pod \"ef9c035d-1064-498a-a95f-98c935ee0bac\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.195857 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4cld\" (UniqueName: \"kubernetes.io/projected/ef9c035d-1064-498a-a95f-98c935ee0bac-kube-api-access-s4cld\") pod \"ef9c035d-1064-498a-a95f-98c935ee0bac\" (UID: \"ef9c035d-1064-498a-a95f-98c935ee0bac\") " Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.198041 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-utilities" (OuterVolumeSpecName: "utilities") pod "ef9c035d-1064-498a-a95f-98c935ee0bac" (UID: "ef9c035d-1064-498a-a95f-98c935ee0bac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.211258 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef9c035d-1064-498a-a95f-98c935ee0bac-kube-api-access-s4cld" (OuterVolumeSpecName: "kube-api-access-s4cld") pod "ef9c035d-1064-498a-a95f-98c935ee0bac" (UID: "ef9c035d-1064-498a-a95f-98c935ee0bac"). InnerVolumeSpecName "kube-api-access-s4cld". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.236808 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ef9c035d-1064-498a-a95f-98c935ee0bac" (UID: "ef9c035d-1064-498a-a95f-98c935ee0bac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.297956 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4cld\" (UniqueName: \"kubernetes.io/projected/ef9c035d-1064-498a-a95f-98c935ee0bac-kube-api-access-s4cld\") on node \"crc\" DevicePath \"\"" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.297993 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.298009 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef9c035d-1064-498a-a95f-98c935ee0bac-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.624736 4861 generic.go:334] "Generic (PLEG): container finished" podID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerID="dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae" exitCode=0 Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.624777 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xds5n" event={"ID":"ef9c035d-1064-498a-a95f-98c935ee0bac","Type":"ContainerDied","Data":"dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae"} Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.624794 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xds5n" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.624813 4861 scope.go:117] "RemoveContainer" containerID="dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.624802 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xds5n" event={"ID":"ef9c035d-1064-498a-a95f-98c935ee0bac","Type":"ContainerDied","Data":"ff4f7647f4acb4b4ced02d4bd780b73a5456bf19c6840fe7a9acd9e75400d3e0"} Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.642700 4861 scope.go:117] "RemoveContainer" containerID="4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.659605 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xds5n"] Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.667239 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xds5n"] Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.687902 4861 scope.go:117] "RemoveContainer" containerID="0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.703560 4861 scope.go:117] "RemoveContainer" containerID="dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae" Jan 29 07:10:50 crc kubenswrapper[4861]: E0129 07:10:50.704299 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae\": container with ID starting with dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae not found: ID does not exist" containerID="dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.704343 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae"} err="failed to get container status \"dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae\": rpc error: code = NotFound desc = could not find container \"dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae\": container with ID starting with dda4214188fa95e3e752c485ec47ddad46496a6b3df6eea6650502929e758dae not found: ID does not exist" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.704366 4861 scope.go:117] "RemoveContainer" containerID="4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8" Jan 29 07:10:50 crc kubenswrapper[4861]: E0129 07:10:50.704689 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8\": container with ID starting with 4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8 not found: ID does not exist" containerID="4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.704736 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8"} err="failed to get container status \"4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8\": rpc error: code = NotFound desc = could not find container \"4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8\": container with ID starting with 4496399958f5a0575cce7b30fa4e7c5378c325005cb0ec8254034ec374a91eb8 not found: ID does not exist" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.704766 4861 scope.go:117] "RemoveContainer" containerID="0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f" Jan 29 07:10:50 crc kubenswrapper[4861]: E0129 07:10:50.705021 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f\": container with ID starting with 0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f not found: ID does not exist" containerID="0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f" Jan 29 07:10:50 crc kubenswrapper[4861]: I0129 07:10:50.705053 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f"} err="failed to get container status \"0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f\": rpc error: code = NotFound desc = could not find container \"0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f\": container with ID starting with 0e123a0446e4c00d41bff42fb7bfa7bf5b1476167b3ae43982ff1e4711a3089f not found: ID does not exist" Jan 29 07:10:51 crc kubenswrapper[4861]: I0129 07:10:51.125936 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" path="/var/lib/kubelet/pods/ef9c035d-1064-498a-a95f-98c935ee0bac/volumes" Jan 29 07:11:30 crc kubenswrapper[4861]: I0129 07:11:30.630486 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:11:30 crc kubenswrapper[4861]: I0129 07:11:30.631055 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:12:00 crc kubenswrapper[4861]: I0129 07:12:00.629569 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:12:00 crc kubenswrapper[4861]: I0129 07:12:00.630293 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:12:30 crc kubenswrapper[4861]: I0129 07:12:30.629733 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:12:30 crc kubenswrapper[4861]: I0129 07:12:30.630253 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:12:30 crc kubenswrapper[4861]: I0129 07:12:30.630312 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:12:30 crc kubenswrapper[4861]: I0129 07:12:30.631057 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:12:30 crc kubenswrapper[4861]: I0129 07:12:30.631151 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" gracePeriod=600 Jan 29 07:12:30 crc kubenswrapper[4861]: E0129 07:12:30.752680 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:12:31 crc kubenswrapper[4861]: I0129 07:12:31.436731 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" exitCode=0 Jan 29 07:12:31 crc kubenswrapper[4861]: I0129 07:12:31.436774 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4"} Jan 29 07:12:31 crc kubenswrapper[4861]: I0129 07:12:31.436805 4861 scope.go:117] "RemoveContainer" containerID="468974c50da9310169ee70656db17c4be121d549832a7ebe2f714fecd2a6908b" Jan 29 07:12:31 crc kubenswrapper[4861]: I0129 07:12:31.437317 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:12:31 crc kubenswrapper[4861]: E0129 07:12:31.437561 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:12:44 crc kubenswrapper[4861]: I0129 07:12:44.116918 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:12:44 crc kubenswrapper[4861]: E0129 07:12:44.118012 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:12:58 crc kubenswrapper[4861]: I0129 07:12:58.116008 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:12:58 crc kubenswrapper[4861]: E0129 07:12:58.116705 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:13:10 crc kubenswrapper[4861]: I0129 07:13:10.116900 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:13:10 crc kubenswrapper[4861]: E0129 07:13:10.118108 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:13:25 crc kubenswrapper[4861]: I0129 07:13:25.116988 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:13:25 crc kubenswrapper[4861]: E0129 07:13:25.117776 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:13:36 crc kubenswrapper[4861]: I0129 07:13:36.116604 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:13:36 crc kubenswrapper[4861]: E0129 07:13:36.119817 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:13:48 crc kubenswrapper[4861]: I0129 07:13:48.115905 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:13:48 crc kubenswrapper[4861]: E0129 07:13:48.116663 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:14:01 crc kubenswrapper[4861]: I0129 07:14:01.116710 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:14:01 crc kubenswrapper[4861]: E0129 07:14:01.117638 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:14:15 crc kubenswrapper[4861]: I0129 07:14:15.117361 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:14:15 crc kubenswrapper[4861]: E0129 07:14:15.118075 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:14:27 crc kubenswrapper[4861]: I0129 07:14:27.116822 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:14:27 crc kubenswrapper[4861]: E0129 07:14:27.117787 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:14:41 crc kubenswrapper[4861]: I0129 07:14:41.116345 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:14:41 crc kubenswrapper[4861]: E0129 07:14:41.117112 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:14:53 crc kubenswrapper[4861]: I0129 07:14:53.116690 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:14:53 crc kubenswrapper[4861]: E0129 07:14:53.117586 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.161214 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747"] Jan 29 07:15:00 crc kubenswrapper[4861]: E0129 07:15:00.161845 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerName="extract-content" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.161859 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerName="extract-content" Jan 29 07:15:00 crc kubenswrapper[4861]: E0129 07:15:00.161896 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerName="registry-server" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.161904 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerName="registry-server" Jan 29 07:15:00 crc kubenswrapper[4861]: E0129 07:15:00.161918 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerName="extract-utilities" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.161927 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerName="extract-utilities" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.162135 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef9c035d-1064-498a-a95f-98c935ee0bac" containerName="registry-server" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.162679 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.165513 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.166661 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.179231 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747"] Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.323042 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wcxb\" (UniqueName: \"kubernetes.io/projected/0caf9398-8b50-416e-ac1a-714fa4569e76-kube-api-access-8wcxb\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.323103 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0caf9398-8b50-416e-ac1a-714fa4569e76-config-volume\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.323125 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0caf9398-8b50-416e-ac1a-714fa4569e76-secret-volume\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.425548 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wcxb\" (UniqueName: \"kubernetes.io/projected/0caf9398-8b50-416e-ac1a-714fa4569e76-kube-api-access-8wcxb\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.425600 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0caf9398-8b50-416e-ac1a-714fa4569e76-config-volume\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.425628 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0caf9398-8b50-416e-ac1a-714fa4569e76-secret-volume\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.426569 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0caf9398-8b50-416e-ac1a-714fa4569e76-config-volume\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.432668 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0caf9398-8b50-416e-ac1a-714fa4569e76-secret-volume\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.450680 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wcxb\" (UniqueName: \"kubernetes.io/projected/0caf9398-8b50-416e-ac1a-714fa4569e76-kube-api-access-8wcxb\") pod \"collect-profiles-29494515-wz747\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.497054 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:00 crc kubenswrapper[4861]: I0129 07:15:00.907444 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747"] Jan 29 07:15:01 crc kubenswrapper[4861]: I0129 07:15:01.787253 4861 generic.go:334] "Generic (PLEG): container finished" podID="0caf9398-8b50-416e-ac1a-714fa4569e76" containerID="4c77653c8ccd0ec1923e5b5c13ac30c2439488d58eefee975e91f8e373121b87" exitCode=0 Jan 29 07:15:01 crc kubenswrapper[4861]: I0129 07:15:01.787308 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" event={"ID":"0caf9398-8b50-416e-ac1a-714fa4569e76","Type":"ContainerDied","Data":"4c77653c8ccd0ec1923e5b5c13ac30c2439488d58eefee975e91f8e373121b87"} Jan 29 07:15:01 crc kubenswrapper[4861]: I0129 07:15:01.787540 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" event={"ID":"0caf9398-8b50-416e-ac1a-714fa4569e76","Type":"ContainerStarted","Data":"f20a77e2b7169d5265d79abbbbeb7d91bcec9d150a12cf949550b98c38e21cf3"} Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.082479 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.162520 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0caf9398-8b50-416e-ac1a-714fa4569e76-config-volume\") pod \"0caf9398-8b50-416e-ac1a-714fa4569e76\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.162572 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0caf9398-8b50-416e-ac1a-714fa4569e76-secret-volume\") pod \"0caf9398-8b50-416e-ac1a-714fa4569e76\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.162698 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wcxb\" (UniqueName: \"kubernetes.io/projected/0caf9398-8b50-416e-ac1a-714fa4569e76-kube-api-access-8wcxb\") pod \"0caf9398-8b50-416e-ac1a-714fa4569e76\" (UID: \"0caf9398-8b50-416e-ac1a-714fa4569e76\") " Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.163466 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0caf9398-8b50-416e-ac1a-714fa4569e76-config-volume" (OuterVolumeSpecName: "config-volume") pod "0caf9398-8b50-416e-ac1a-714fa4569e76" (UID: "0caf9398-8b50-416e-ac1a-714fa4569e76"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.168056 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0caf9398-8b50-416e-ac1a-714fa4569e76-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0caf9398-8b50-416e-ac1a-714fa4569e76" (UID: "0caf9398-8b50-416e-ac1a-714fa4569e76"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.168259 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0caf9398-8b50-416e-ac1a-714fa4569e76-kube-api-access-8wcxb" (OuterVolumeSpecName: "kube-api-access-8wcxb") pod "0caf9398-8b50-416e-ac1a-714fa4569e76" (UID: "0caf9398-8b50-416e-ac1a-714fa4569e76"). InnerVolumeSpecName "kube-api-access-8wcxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.264697 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wcxb\" (UniqueName: \"kubernetes.io/projected/0caf9398-8b50-416e-ac1a-714fa4569e76-kube-api-access-8wcxb\") on node \"crc\" DevicePath \"\"" Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.264742 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0caf9398-8b50-416e-ac1a-714fa4569e76-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.264756 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0caf9398-8b50-416e-ac1a-714fa4569e76-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.805399 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" event={"ID":"0caf9398-8b50-416e-ac1a-714fa4569e76","Type":"ContainerDied","Data":"f20a77e2b7169d5265d79abbbbeb7d91bcec9d150a12cf949550b98c38e21cf3"} Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.805446 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f20a77e2b7169d5265d79abbbbeb7d91bcec9d150a12cf949550b98c38e21cf3" Jan 29 07:15:03 crc kubenswrapper[4861]: I0129 07:15:03.805446 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747" Jan 29 07:15:04 crc kubenswrapper[4861]: I0129 07:15:04.193768 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4"] Jan 29 07:15:04 crc kubenswrapper[4861]: I0129 07:15:04.206421 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494470-zsdk4"] Jan 29 07:15:05 crc kubenswrapper[4861]: I0129 07:15:05.126822 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cead6a1e-5df8-4937-9ee3-71efe555615f" path="/var/lib/kubelet/pods/cead6a1e-5df8-4937-9ee3-71efe555615f/volumes" Jan 29 07:15:08 crc kubenswrapper[4861]: I0129 07:15:08.117204 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:15:08 crc kubenswrapper[4861]: E0129 07:15:08.117721 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:15:19 crc kubenswrapper[4861]: I0129 07:15:19.119414 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:15:19 crc kubenswrapper[4861]: E0129 07:15:19.120316 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:15:24 crc kubenswrapper[4861]: I0129 07:15:24.064727 4861 scope.go:117] "RemoveContainer" containerID="e4b916c599c0cc7e6a0d63d8fb6a748a231e2a87702527f7c74b582ca5fb4435" Jan 29 07:15:31 crc kubenswrapper[4861]: I0129 07:15:31.116835 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:15:31 crc kubenswrapper[4861]: E0129 07:15:31.117668 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:15:46 crc kubenswrapper[4861]: I0129 07:15:46.120270 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:15:46 crc kubenswrapper[4861]: E0129 07:15:46.121088 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:15:57 crc kubenswrapper[4861]: I0129 07:15:57.117398 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:15:57 crc kubenswrapper[4861]: E0129 07:15:57.118452 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:16:08 crc kubenswrapper[4861]: I0129 07:16:08.117133 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:16:08 crc kubenswrapper[4861]: E0129 07:16:08.117879 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:16:19 crc kubenswrapper[4861]: I0129 07:16:19.119859 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:16:19 crc kubenswrapper[4861]: E0129 07:16:19.120681 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:16:34 crc kubenswrapper[4861]: I0129 07:16:34.116974 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:16:34 crc kubenswrapper[4861]: E0129 07:16:34.118316 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.224660 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p54w2"] Jan 29 07:16:36 crc kubenswrapper[4861]: E0129 07:16:36.226597 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0caf9398-8b50-416e-ac1a-714fa4569e76" containerName="collect-profiles" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.226739 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0caf9398-8b50-416e-ac1a-714fa4569e76" containerName="collect-profiles" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.227096 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0caf9398-8b50-416e-ac1a-714fa4569e76" containerName="collect-profiles" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.232749 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.255569 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p54w2"] Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.364680 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnvjd\" (UniqueName: \"kubernetes.io/projected/12919e56-2ccc-4b92-90d5-ca5f535931dc-kube-api-access-gnvjd\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.364737 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-catalog-content\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.364788 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-utilities\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.417828 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lcwqg"] Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.419263 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.438417 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lcwqg"] Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.466098 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-utilities\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.466214 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnvjd\" (UniqueName: \"kubernetes.io/projected/12919e56-2ccc-4b92-90d5-ca5f535931dc-kube-api-access-gnvjd\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.466244 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-catalog-content\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.466629 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-catalog-content\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.466852 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-utilities\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.489225 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnvjd\" (UniqueName: \"kubernetes.io/projected/12919e56-2ccc-4b92-90d5-ca5f535931dc-kube-api-access-gnvjd\") pod \"certified-operators-p54w2\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.553979 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.568217 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-catalog-content\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.568346 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f69nx\" (UniqueName: \"kubernetes.io/projected/80a5b3a2-0768-4164-9af9-d93a81bf99d1-kube-api-access-f69nx\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.568443 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-utilities\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.669561 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-utilities\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.669660 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-catalog-content\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.669702 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f69nx\" (UniqueName: \"kubernetes.io/projected/80a5b3a2-0768-4164-9af9-d93a81bf99d1-kube-api-access-f69nx\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.670236 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-utilities\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.670236 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-catalog-content\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.700195 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f69nx\" (UniqueName: \"kubernetes.io/projected/80a5b3a2-0768-4164-9af9-d93a81bf99d1-kube-api-access-f69nx\") pod \"community-operators-lcwqg\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:36 crc kubenswrapper[4861]: I0129 07:16:36.738422 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.063995 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p54w2"] Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.219042 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lcwqg"] Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.608286 4861 generic.go:334] "Generic (PLEG): container finished" podID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerID="467c58de2976897eb087ceaf153257dd52f1af667f30f9e00d8f4f521b2b6dc6" exitCode=0 Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.608637 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p54w2" event={"ID":"12919e56-2ccc-4b92-90d5-ca5f535931dc","Type":"ContainerDied","Data":"467c58de2976897eb087ceaf153257dd52f1af667f30f9e00d8f4f521b2b6dc6"} Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.609032 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p54w2" event={"ID":"12919e56-2ccc-4b92-90d5-ca5f535931dc","Type":"ContainerStarted","Data":"b524842427a20b804186fd4ed6e8406dcda688766120427c11ef564306a5b72d"} Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.611033 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.614124 4861 generic.go:334] "Generic (PLEG): container finished" podID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerID="524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e" exitCode=0 Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.614243 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcwqg" event={"ID":"80a5b3a2-0768-4164-9af9-d93a81bf99d1","Type":"ContainerDied","Data":"524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e"} Jan 29 07:16:37 crc kubenswrapper[4861]: I0129 07:16:37.615314 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcwqg" event={"ID":"80a5b3a2-0768-4164-9af9-d93a81bf99d1","Type":"ContainerStarted","Data":"e92e10f18f523789eb4b66a40ce63de48489a20acc4e377bde05588b7ac27253"} Jan 29 07:16:38 crc kubenswrapper[4861]: I0129 07:16:38.622299 4861 generic.go:334] "Generic (PLEG): container finished" podID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerID="81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e" exitCode=0 Jan 29 07:16:38 crc kubenswrapper[4861]: I0129 07:16:38.622388 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcwqg" event={"ID":"80a5b3a2-0768-4164-9af9-d93a81bf99d1","Type":"ContainerDied","Data":"81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e"} Jan 29 07:16:39 crc kubenswrapper[4861]: I0129 07:16:39.629980 4861 generic.go:334] "Generic (PLEG): container finished" podID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerID="84a898d034ded5bca48fd2274b153d6f307f3f96f3b3fa52045e6538326fb2fa" exitCode=0 Jan 29 07:16:39 crc kubenswrapper[4861]: I0129 07:16:39.630106 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p54w2" event={"ID":"12919e56-2ccc-4b92-90d5-ca5f535931dc","Type":"ContainerDied","Data":"84a898d034ded5bca48fd2274b153d6f307f3f96f3b3fa52045e6538326fb2fa"} Jan 29 07:16:39 crc kubenswrapper[4861]: I0129 07:16:39.632319 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcwqg" event={"ID":"80a5b3a2-0768-4164-9af9-d93a81bf99d1","Type":"ContainerStarted","Data":"c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329"} Jan 29 07:16:39 crc kubenswrapper[4861]: I0129 07:16:39.671457 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lcwqg" podStartSLOduration=2.282272341 podStartE2EDuration="3.671439236s" podCreationTimestamp="2026-01-29 07:16:36 +0000 UTC" firstStartedPulling="2026-01-29 07:16:37.618736038 +0000 UTC m=+2489.290230585" lastFinishedPulling="2026-01-29 07:16:39.007902923 +0000 UTC m=+2490.679397480" observedRunningTime="2026-01-29 07:16:39.667504252 +0000 UTC m=+2491.338998829" watchObservedRunningTime="2026-01-29 07:16:39.671439236 +0000 UTC m=+2491.342933793" Jan 29 07:16:40 crc kubenswrapper[4861]: I0129 07:16:40.640975 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p54w2" event={"ID":"12919e56-2ccc-4b92-90d5-ca5f535931dc","Type":"ContainerStarted","Data":"2ff59fc1c499bdcdeb0d12224e7e6b2c1296acfd46377c1936c0af9cbe9e1c63"} Jan 29 07:16:40 crc kubenswrapper[4861]: I0129 07:16:40.660756 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p54w2" podStartSLOduration=2.155923963 podStartE2EDuration="4.660733422s" podCreationTimestamp="2026-01-29 07:16:36 +0000 UTC" firstStartedPulling="2026-01-29 07:16:37.610793049 +0000 UTC m=+2489.282287606" lastFinishedPulling="2026-01-29 07:16:40.115602508 +0000 UTC m=+2491.787097065" observedRunningTime="2026-01-29 07:16:40.657540388 +0000 UTC m=+2492.329034975" watchObservedRunningTime="2026-01-29 07:16:40.660733422 +0000 UTC m=+2492.332227999" Jan 29 07:16:46 crc kubenswrapper[4861]: I0129 07:16:46.554090 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:46 crc kubenswrapper[4861]: I0129 07:16:46.554805 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:46 crc kubenswrapper[4861]: I0129 07:16:46.610250 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:46 crc kubenswrapper[4861]: I0129 07:16:46.739952 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:46 crc kubenswrapper[4861]: I0129 07:16:46.740057 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:46 crc kubenswrapper[4861]: I0129 07:16:46.751904 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:46 crc kubenswrapper[4861]: I0129 07:16:46.823559 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:47 crc kubenswrapper[4861]: I0129 07:16:47.117659 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:16:47 crc kubenswrapper[4861]: E0129 07:16:47.118360 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:16:47 crc kubenswrapper[4861]: I0129 07:16:47.771801 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:49 crc kubenswrapper[4861]: I0129 07:16:49.418308 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p54w2"] Jan 29 07:16:49 crc kubenswrapper[4861]: I0129 07:16:49.419113 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p54w2" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerName="registry-server" containerID="cri-o://2ff59fc1c499bdcdeb0d12224e7e6b2c1296acfd46377c1936c0af9cbe9e1c63" gracePeriod=2 Jan 29 07:16:49 crc kubenswrapper[4861]: I0129 07:16:49.612672 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lcwqg"] Jan 29 07:16:49 crc kubenswrapper[4861]: I0129 07:16:49.717483 4861 generic.go:334] "Generic (PLEG): container finished" podID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerID="2ff59fc1c499bdcdeb0d12224e7e6b2c1296acfd46377c1936c0af9cbe9e1c63" exitCode=0 Jan 29 07:16:49 crc kubenswrapper[4861]: I0129 07:16:49.717582 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p54w2" event={"ID":"12919e56-2ccc-4b92-90d5-ca5f535931dc","Type":"ContainerDied","Data":"2ff59fc1c499bdcdeb0d12224e7e6b2c1296acfd46377c1936c0af9cbe9e1c63"} Jan 29 07:16:49 crc kubenswrapper[4861]: I0129 07:16:49.910846 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.094700 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-utilities\") pod \"12919e56-2ccc-4b92-90d5-ca5f535931dc\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.094740 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-catalog-content\") pod \"12919e56-2ccc-4b92-90d5-ca5f535931dc\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.096220 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-utilities" (OuterVolumeSpecName: "utilities") pod "12919e56-2ccc-4b92-90d5-ca5f535931dc" (UID: "12919e56-2ccc-4b92-90d5-ca5f535931dc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.101099 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12919e56-2ccc-4b92-90d5-ca5f535931dc-kube-api-access-gnvjd" (OuterVolumeSpecName: "kube-api-access-gnvjd") pod "12919e56-2ccc-4b92-90d5-ca5f535931dc" (UID: "12919e56-2ccc-4b92-90d5-ca5f535931dc"). InnerVolumeSpecName "kube-api-access-gnvjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.106183 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnvjd\" (UniqueName: \"kubernetes.io/projected/12919e56-2ccc-4b92-90d5-ca5f535931dc-kube-api-access-gnvjd\") pod \"12919e56-2ccc-4b92-90d5-ca5f535931dc\" (UID: \"12919e56-2ccc-4b92-90d5-ca5f535931dc\") " Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.106697 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.106723 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnvjd\" (UniqueName: \"kubernetes.io/projected/12919e56-2ccc-4b92-90d5-ca5f535931dc-kube-api-access-gnvjd\") on node \"crc\" DevicePath \"\"" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.142194 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12919e56-2ccc-4b92-90d5-ca5f535931dc" (UID: "12919e56-2ccc-4b92-90d5-ca5f535931dc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.207758 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12919e56-2ccc-4b92-90d5-ca5f535931dc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.730250 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p54w2" event={"ID":"12919e56-2ccc-4b92-90d5-ca5f535931dc","Type":"ContainerDied","Data":"b524842427a20b804186fd4ed6e8406dcda688766120427c11ef564306a5b72d"} Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.730340 4861 scope.go:117] "RemoveContainer" containerID="2ff59fc1c499bdcdeb0d12224e7e6b2c1296acfd46377c1936c0af9cbe9e1c63" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.730416 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p54w2" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.730478 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lcwqg" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerName="registry-server" containerID="cri-o://c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329" gracePeriod=2 Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.768564 4861 scope.go:117] "RemoveContainer" containerID="84a898d034ded5bca48fd2274b153d6f307f3f96f3b3fa52045e6538326fb2fa" Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.786740 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p54w2"] Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.800804 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p54w2"] Jan 29 07:16:50 crc kubenswrapper[4861]: I0129 07:16:50.811158 4861 scope.go:117] "RemoveContainer" containerID="467c58de2976897eb087ceaf153257dd52f1af667f30f9e00d8f4f521b2b6dc6" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.126366 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" path="/var/lib/kubelet/pods/12919e56-2ccc-4b92-90d5-ca5f535931dc/volumes" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.197839 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.326776 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-catalog-content\") pod \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.327024 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f69nx\" (UniqueName: \"kubernetes.io/projected/80a5b3a2-0768-4164-9af9-d93a81bf99d1-kube-api-access-f69nx\") pod \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.327136 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-utilities\") pod \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\" (UID: \"80a5b3a2-0768-4164-9af9-d93a81bf99d1\") " Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.328118 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-utilities" (OuterVolumeSpecName: "utilities") pod "80a5b3a2-0768-4164-9af9-d93a81bf99d1" (UID: "80a5b3a2-0768-4164-9af9-d93a81bf99d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.336903 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80a5b3a2-0768-4164-9af9-d93a81bf99d1-kube-api-access-f69nx" (OuterVolumeSpecName: "kube-api-access-f69nx") pod "80a5b3a2-0768-4164-9af9-d93a81bf99d1" (UID: "80a5b3a2-0768-4164-9af9-d93a81bf99d1"). InnerVolumeSpecName "kube-api-access-f69nx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.412230 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80a5b3a2-0768-4164-9af9-d93a81bf99d1" (UID: "80a5b3a2-0768-4164-9af9-d93a81bf99d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.428357 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.428405 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f69nx\" (UniqueName: \"kubernetes.io/projected/80a5b3a2-0768-4164-9af9-d93a81bf99d1-kube-api-access-f69nx\") on node \"crc\" DevicePath \"\"" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.428420 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80a5b3a2-0768-4164-9af9-d93a81bf99d1-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.745572 4861 generic.go:334] "Generic (PLEG): container finished" podID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerID="c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329" exitCode=0 Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.745667 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcwqg" event={"ID":"80a5b3a2-0768-4164-9af9-d93a81bf99d1","Type":"ContainerDied","Data":"c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329"} Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.745707 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lcwqg" event={"ID":"80a5b3a2-0768-4164-9af9-d93a81bf99d1","Type":"ContainerDied","Data":"e92e10f18f523789eb4b66a40ce63de48489a20acc4e377bde05588b7ac27253"} Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.745735 4861 scope.go:117] "RemoveContainer" containerID="c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.745895 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lcwqg" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.778650 4861 scope.go:117] "RemoveContainer" containerID="81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.808826 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lcwqg"] Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.820907 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lcwqg"] Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.828863 4861 scope.go:117] "RemoveContainer" containerID="524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.874266 4861 scope.go:117] "RemoveContainer" containerID="c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329" Jan 29 07:16:51 crc kubenswrapper[4861]: E0129 07:16:51.878212 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329\": container with ID starting with c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329 not found: ID does not exist" containerID="c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.878262 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329"} err="failed to get container status \"c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329\": rpc error: code = NotFound desc = could not find container \"c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329\": container with ID starting with c6e118c822fd8fdf6d87e1075bba9949b3dd2556049670e0655e3de5f69d6329 not found: ID does not exist" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.878295 4861 scope.go:117] "RemoveContainer" containerID="81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e" Jan 29 07:16:51 crc kubenswrapper[4861]: E0129 07:16:51.882161 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e\": container with ID starting with 81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e not found: ID does not exist" containerID="81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.882190 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e"} err="failed to get container status \"81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e\": rpc error: code = NotFound desc = could not find container \"81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e\": container with ID starting with 81eb9fb4e2be784dfb1db3b2b1c64b2f1c2fd0d122ccf7ff8a62290113e8970e not found: ID does not exist" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.882205 4861 scope.go:117] "RemoveContainer" containerID="524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e" Jan 29 07:16:51 crc kubenswrapper[4861]: E0129 07:16:51.885157 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e\": container with ID starting with 524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e not found: ID does not exist" containerID="524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e" Jan 29 07:16:51 crc kubenswrapper[4861]: I0129 07:16:51.885179 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e"} err="failed to get container status \"524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e\": rpc error: code = NotFound desc = could not find container \"524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e\": container with ID starting with 524ebb16ef166b3171c58995a4ca01930111c309393d3145960898d248ff738e not found: ID does not exist" Jan 29 07:16:53 crc kubenswrapper[4861]: I0129 07:16:53.133760 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" path="/var/lib/kubelet/pods/80a5b3a2-0768-4164-9af9-d93a81bf99d1/volumes" Jan 29 07:17:01 crc kubenswrapper[4861]: I0129 07:17:01.116583 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:17:01 crc kubenswrapper[4861]: E0129 07:17:01.117520 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:17:15 crc kubenswrapper[4861]: I0129 07:17:15.117344 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:17:15 crc kubenswrapper[4861]: E0129 07:17:15.118691 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:17:29 crc kubenswrapper[4861]: I0129 07:17:29.125464 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:17:29 crc kubenswrapper[4861]: E0129 07:17:29.126560 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:17:40 crc kubenswrapper[4861]: I0129 07:17:40.117289 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:17:41 crc kubenswrapper[4861]: I0129 07:17:41.185569 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"631251c83015ad5aae91e0d1e2a9c95708466a7913966eafee8faa3dc681a06e"} Jan 29 07:20:00 crc kubenswrapper[4861]: I0129 07:20:00.629636 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:20:00 crc kubenswrapper[4861]: I0129 07:20:00.631062 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:20:30 crc kubenswrapper[4861]: I0129 07:20:30.629276 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:20:30 crc kubenswrapper[4861]: I0129 07:20:30.629695 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:21:00 crc kubenswrapper[4861]: I0129 07:21:00.632353 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:21:00 crc kubenswrapper[4861]: I0129 07:21:00.633003 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:21:00 crc kubenswrapper[4861]: I0129 07:21:00.633101 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:21:00 crc kubenswrapper[4861]: I0129 07:21:00.633989 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"631251c83015ad5aae91e0d1e2a9c95708466a7913966eafee8faa3dc681a06e"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:21:00 crc kubenswrapper[4861]: I0129 07:21:00.634044 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://631251c83015ad5aae91e0d1e2a9c95708466a7913966eafee8faa3dc681a06e" gracePeriod=600 Jan 29 07:21:00 crc kubenswrapper[4861]: I0129 07:21:00.780951 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="631251c83015ad5aae91e0d1e2a9c95708466a7913966eafee8faa3dc681a06e" exitCode=0 Jan 29 07:21:00 crc kubenswrapper[4861]: I0129 07:21:00.781105 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"631251c83015ad5aae91e0d1e2a9c95708466a7913966eafee8faa3dc681a06e"} Jan 29 07:21:00 crc kubenswrapper[4861]: I0129 07:21:00.781341 4861 scope.go:117] "RemoveContainer" containerID="10e8783698aeb3d457e9335a1abcccb2340b9819fda138750ed21bfc972a03c4" Jan 29 07:21:01 crc kubenswrapper[4861]: I0129 07:21:01.792175 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19"} Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.138919 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dtfpt"] Jan 29 07:22:06 crc kubenswrapper[4861]: E0129 07:22:06.150654 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerName="extract-utilities" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.150679 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerName="extract-utilities" Jan 29 07:22:06 crc kubenswrapper[4861]: E0129 07:22:06.150697 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerName="extract-content" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.150706 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerName="extract-content" Jan 29 07:22:06 crc kubenswrapper[4861]: E0129 07:22:06.150724 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerName="extract-content" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.150735 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerName="extract-content" Jan 29 07:22:06 crc kubenswrapper[4861]: E0129 07:22:06.150747 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerName="extract-utilities" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.150755 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerName="extract-utilities" Jan 29 07:22:06 crc kubenswrapper[4861]: E0129 07:22:06.150776 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerName="registry-server" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.150783 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerName="registry-server" Jan 29 07:22:06 crc kubenswrapper[4861]: E0129 07:22:06.150808 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerName="registry-server" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.150815 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerName="registry-server" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.150992 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="12919e56-2ccc-4b92-90d5-ca5f535931dc" containerName="registry-server" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.151012 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="80a5b3a2-0768-4164-9af9-d93a81bf99d1" containerName="registry-server" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.152415 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.154949 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtfpt"] Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.305147 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-catalog-content\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.305239 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-utilities\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.305294 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfpp8\" (UniqueName: \"kubernetes.io/projected/c4aa0965-1979-4cf7-8a19-fda7339193c3-kube-api-access-dfpp8\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.407245 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-catalog-content\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.407344 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-utilities\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.407433 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfpp8\" (UniqueName: \"kubernetes.io/projected/c4aa0965-1979-4cf7-8a19-fda7339193c3-kube-api-access-dfpp8\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.407725 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-catalog-content\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.407839 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-utilities\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.427834 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfpp8\" (UniqueName: \"kubernetes.io/projected/c4aa0965-1979-4cf7-8a19-fda7339193c3-kube-api-access-dfpp8\") pod \"redhat-marketplace-dtfpt\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.492197 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:06 crc kubenswrapper[4861]: I0129 07:22:06.993124 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtfpt"] Jan 29 07:22:07 crc kubenswrapper[4861]: W0129 07:22:06.999801 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4aa0965_1979_4cf7_8a19_fda7339193c3.slice/crio-69de5855c35c627448db4b3abd0c788533cc43c1aba596958f2021d0ac11b2e1 WatchSource:0}: Error finding container 69de5855c35c627448db4b3abd0c788533cc43c1aba596958f2021d0ac11b2e1: Status 404 returned error can't find the container with id 69de5855c35c627448db4b3abd0c788533cc43c1aba596958f2021d0ac11b2e1 Jan 29 07:22:07 crc kubenswrapper[4861]: I0129 07:22:07.377600 4861 generic.go:334] "Generic (PLEG): container finished" podID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerID="54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd" exitCode=0 Jan 29 07:22:07 crc kubenswrapper[4861]: I0129 07:22:07.377695 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtfpt" event={"ID":"c4aa0965-1979-4cf7-8a19-fda7339193c3","Type":"ContainerDied","Data":"54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd"} Jan 29 07:22:07 crc kubenswrapper[4861]: I0129 07:22:07.378103 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtfpt" event={"ID":"c4aa0965-1979-4cf7-8a19-fda7339193c3","Type":"ContainerStarted","Data":"69de5855c35c627448db4b3abd0c788533cc43c1aba596958f2021d0ac11b2e1"} Jan 29 07:22:07 crc kubenswrapper[4861]: I0129 07:22:07.380479 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 07:22:08 crc kubenswrapper[4861]: I0129 07:22:08.391273 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtfpt" event={"ID":"c4aa0965-1979-4cf7-8a19-fda7339193c3","Type":"ContainerStarted","Data":"32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e"} Jan 29 07:22:09 crc kubenswrapper[4861]: I0129 07:22:09.400439 4861 generic.go:334] "Generic (PLEG): container finished" podID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerID="32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e" exitCode=0 Jan 29 07:22:09 crc kubenswrapper[4861]: I0129 07:22:09.400712 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtfpt" event={"ID":"c4aa0965-1979-4cf7-8a19-fda7339193c3","Type":"ContainerDied","Data":"32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e"} Jan 29 07:22:10 crc kubenswrapper[4861]: I0129 07:22:10.416990 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtfpt" event={"ID":"c4aa0965-1979-4cf7-8a19-fda7339193c3","Type":"ContainerStarted","Data":"a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101"} Jan 29 07:22:10 crc kubenswrapper[4861]: I0129 07:22:10.451503 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dtfpt" podStartSLOduration=1.916745841 podStartE2EDuration="4.451485349s" podCreationTimestamp="2026-01-29 07:22:06 +0000 UTC" firstStartedPulling="2026-01-29 07:22:07.380152557 +0000 UTC m=+2819.051647124" lastFinishedPulling="2026-01-29 07:22:09.914892075 +0000 UTC m=+2821.586386632" observedRunningTime="2026-01-29 07:22:10.451365916 +0000 UTC m=+2822.122860563" watchObservedRunningTime="2026-01-29 07:22:10.451485349 +0000 UTC m=+2822.122979916" Jan 29 07:22:16 crc kubenswrapper[4861]: I0129 07:22:16.492436 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:16 crc kubenswrapper[4861]: I0129 07:22:16.493197 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:16 crc kubenswrapper[4861]: I0129 07:22:16.544572 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:17 crc kubenswrapper[4861]: I0129 07:22:17.569754 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:17 crc kubenswrapper[4861]: I0129 07:22:17.636269 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtfpt"] Jan 29 07:22:19 crc kubenswrapper[4861]: I0129 07:22:19.528189 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dtfpt" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerName="registry-server" containerID="cri-o://a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101" gracePeriod=2 Jan 29 07:22:19 crc kubenswrapper[4861]: I0129 07:22:19.996465 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.069387 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfpp8\" (UniqueName: \"kubernetes.io/projected/c4aa0965-1979-4cf7-8a19-fda7339193c3-kube-api-access-dfpp8\") pod \"c4aa0965-1979-4cf7-8a19-fda7339193c3\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.069751 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-utilities\") pod \"c4aa0965-1979-4cf7-8a19-fda7339193c3\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.069791 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-catalog-content\") pod \"c4aa0965-1979-4cf7-8a19-fda7339193c3\" (UID: \"c4aa0965-1979-4cf7-8a19-fda7339193c3\") " Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.070562 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-utilities" (OuterVolumeSpecName: "utilities") pod "c4aa0965-1979-4cf7-8a19-fda7339193c3" (UID: "c4aa0965-1979-4cf7-8a19-fda7339193c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.080286 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4aa0965-1979-4cf7-8a19-fda7339193c3-kube-api-access-dfpp8" (OuterVolumeSpecName: "kube-api-access-dfpp8") pod "c4aa0965-1979-4cf7-8a19-fda7339193c3" (UID: "c4aa0965-1979-4cf7-8a19-fda7339193c3"). InnerVolumeSpecName "kube-api-access-dfpp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.108847 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c4aa0965-1979-4cf7-8a19-fda7339193c3" (UID: "c4aa0965-1979-4cf7-8a19-fda7339193c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.171585 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.171635 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4aa0965-1979-4cf7-8a19-fda7339193c3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.171658 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfpp8\" (UniqueName: \"kubernetes.io/projected/c4aa0965-1979-4cf7-8a19-fda7339193c3-kube-api-access-dfpp8\") on node \"crc\" DevicePath \"\"" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.538851 4861 generic.go:334] "Generic (PLEG): container finished" podID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerID="a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101" exitCode=0 Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.539116 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtfpt" event={"ID":"c4aa0965-1979-4cf7-8a19-fda7339193c3","Type":"ContainerDied","Data":"a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101"} Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.539181 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtfpt" event={"ID":"c4aa0965-1979-4cf7-8a19-fda7339193c3","Type":"ContainerDied","Data":"69de5855c35c627448db4b3abd0c788533cc43c1aba596958f2021d0ac11b2e1"} Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.539205 4861 scope.go:117] "RemoveContainer" containerID="a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.539391 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtfpt" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.561721 4861 scope.go:117] "RemoveContainer" containerID="32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.584556 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtfpt"] Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.590136 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtfpt"] Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.600690 4861 scope.go:117] "RemoveContainer" containerID="54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.626002 4861 scope.go:117] "RemoveContainer" containerID="a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101" Jan 29 07:22:20 crc kubenswrapper[4861]: E0129 07:22:20.626492 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101\": container with ID starting with a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101 not found: ID does not exist" containerID="a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.626528 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101"} err="failed to get container status \"a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101\": rpc error: code = NotFound desc = could not find container \"a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101\": container with ID starting with a54b81b3e310f6e52e9050450b4859c4815b121782a5b7d10174beceed662101 not found: ID does not exist" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.626547 4861 scope.go:117] "RemoveContainer" containerID="32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e" Jan 29 07:22:20 crc kubenswrapper[4861]: E0129 07:22:20.626848 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e\": container with ID starting with 32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e not found: ID does not exist" containerID="32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.626872 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e"} err="failed to get container status \"32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e\": rpc error: code = NotFound desc = could not find container \"32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e\": container with ID starting with 32cf2def0241ee7fc452deb76a00d3a3ce11a9d72ec30915573fe4390d5f264e not found: ID does not exist" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.626891 4861 scope.go:117] "RemoveContainer" containerID="54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd" Jan 29 07:22:20 crc kubenswrapper[4861]: E0129 07:22:20.627239 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd\": container with ID starting with 54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd not found: ID does not exist" containerID="54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd" Jan 29 07:22:20 crc kubenswrapper[4861]: I0129 07:22:20.627266 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd"} err="failed to get container status \"54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd\": rpc error: code = NotFound desc = could not find container \"54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd\": container with ID starting with 54c3fe80543139a0da1e15da5475d0bba1dfa12ae30711dbbbe464ce302d09fd not found: ID does not exist" Jan 29 07:22:21 crc kubenswrapper[4861]: I0129 07:22:21.139248 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" path="/var/lib/kubelet/pods/c4aa0965-1979-4cf7-8a19-fda7339193c3/volumes" Jan 29 07:23:00 crc kubenswrapper[4861]: I0129 07:23:00.629782 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:23:00 crc kubenswrapper[4861]: I0129 07:23:00.630478 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.839223 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9f256"] Jan 29 07:23:03 crc kubenswrapper[4861]: E0129 07:23:03.839991 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerName="extract-content" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.840013 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerName="extract-content" Jan 29 07:23:03 crc kubenswrapper[4861]: E0129 07:23:03.840046 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerName="extract-utilities" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.840059 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerName="extract-utilities" Jan 29 07:23:03 crc kubenswrapper[4861]: E0129 07:23:03.840157 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerName="registry-server" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.840172 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerName="registry-server" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.840421 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4aa0965-1979-4cf7-8a19-fda7339193c3" containerName="registry-server" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.842164 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.854595 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9f256"] Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.926108 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmn6l\" (UniqueName: \"kubernetes.io/projected/98c421ac-c4e9-43e5-b76b-a2094fba7539-kube-api-access-wmn6l\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.926185 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-utilities\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:03 crc kubenswrapper[4861]: I0129 07:23:03.926403 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-catalog-content\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.028041 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-catalog-content\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.028198 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmn6l\" (UniqueName: \"kubernetes.io/projected/98c421ac-c4e9-43e5-b76b-a2094fba7539-kube-api-access-wmn6l\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.028234 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-utilities\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.028763 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-utilities\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.028830 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-catalog-content\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.056654 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmn6l\" (UniqueName: \"kubernetes.io/projected/98c421ac-c4e9-43e5-b76b-a2094fba7539-kube-api-access-wmn6l\") pod \"redhat-operators-9f256\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.178674 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.420424 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9f256"] Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.953709 4861 generic.go:334] "Generic (PLEG): container finished" podID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerID="6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674" exitCode=0 Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.953841 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9f256" event={"ID":"98c421ac-c4e9-43e5-b76b-a2094fba7539","Type":"ContainerDied","Data":"6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674"} Jan 29 07:23:04 crc kubenswrapper[4861]: I0129 07:23:04.954019 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9f256" event={"ID":"98c421ac-c4e9-43e5-b76b-a2094fba7539","Type":"ContainerStarted","Data":"fee1f3765e9410f20c530708b63bc49e561027703c2567556abdf417f57ced79"} Jan 29 07:23:05 crc kubenswrapper[4861]: I0129 07:23:05.972696 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9f256" event={"ID":"98c421ac-c4e9-43e5-b76b-a2094fba7539","Type":"ContainerStarted","Data":"276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309"} Jan 29 07:23:06 crc kubenswrapper[4861]: I0129 07:23:06.984587 4861 generic.go:334] "Generic (PLEG): container finished" podID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerID="276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309" exitCode=0 Jan 29 07:23:06 crc kubenswrapper[4861]: I0129 07:23:06.984640 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9f256" event={"ID":"98c421ac-c4e9-43e5-b76b-a2094fba7539","Type":"ContainerDied","Data":"276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309"} Jan 29 07:23:07 crc kubenswrapper[4861]: I0129 07:23:07.999299 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9f256" event={"ID":"98c421ac-c4e9-43e5-b76b-a2094fba7539","Type":"ContainerStarted","Data":"abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32"} Jan 29 07:23:14 crc kubenswrapper[4861]: I0129 07:23:14.179672 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:14 crc kubenswrapper[4861]: I0129 07:23:14.180231 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:15 crc kubenswrapper[4861]: I0129 07:23:15.220003 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9f256" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="registry-server" probeResult="failure" output=< Jan 29 07:23:15 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 07:23:15 crc kubenswrapper[4861]: > Jan 29 07:23:24 crc kubenswrapper[4861]: I0129 07:23:24.240396 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:24 crc kubenswrapper[4861]: I0129 07:23:24.266319 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9f256" podStartSLOduration=18.833507546 podStartE2EDuration="21.266300258s" podCreationTimestamp="2026-01-29 07:23:03 +0000 UTC" firstStartedPulling="2026-01-29 07:23:04.956442044 +0000 UTC m=+2876.627936601" lastFinishedPulling="2026-01-29 07:23:07.389234756 +0000 UTC m=+2879.060729313" observedRunningTime="2026-01-29 07:23:08.027567392 +0000 UTC m=+2879.699062009" watchObservedRunningTime="2026-01-29 07:23:24.266300258 +0000 UTC m=+2895.937794825" Jan 29 07:23:24 crc kubenswrapper[4861]: I0129 07:23:24.294022 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.158126 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9f256"] Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.162390 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9f256" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="registry-server" containerID="cri-o://abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32" gracePeriod=2 Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.597220 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.786159 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-catalog-content\") pod \"98c421ac-c4e9-43e5-b76b-a2094fba7539\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.786262 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmn6l\" (UniqueName: \"kubernetes.io/projected/98c421ac-c4e9-43e5-b76b-a2094fba7539-kube-api-access-wmn6l\") pod \"98c421ac-c4e9-43e5-b76b-a2094fba7539\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.786352 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-utilities\") pod \"98c421ac-c4e9-43e5-b76b-a2094fba7539\" (UID: \"98c421ac-c4e9-43e5-b76b-a2094fba7539\") " Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.787846 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-utilities" (OuterVolumeSpecName: "utilities") pod "98c421ac-c4e9-43e5-b76b-a2094fba7539" (UID: "98c421ac-c4e9-43e5-b76b-a2094fba7539"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.791720 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98c421ac-c4e9-43e5-b76b-a2094fba7539-kube-api-access-wmn6l" (OuterVolumeSpecName: "kube-api-access-wmn6l") pod "98c421ac-c4e9-43e5-b76b-a2094fba7539" (UID: "98c421ac-c4e9-43e5-b76b-a2094fba7539"). InnerVolumeSpecName "kube-api-access-wmn6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.887824 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmn6l\" (UniqueName: \"kubernetes.io/projected/98c421ac-c4e9-43e5-b76b-a2094fba7539-kube-api-access-wmn6l\") on node \"crc\" DevicePath \"\"" Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.887873 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.969579 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "98c421ac-c4e9-43e5-b76b-a2094fba7539" (UID: "98c421ac-c4e9-43e5-b76b-a2094fba7539"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:23:26 crc kubenswrapper[4861]: I0129 07:23:26.988899 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c421ac-c4e9-43e5-b76b-a2094fba7539-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.170356 4861 generic.go:334] "Generic (PLEG): container finished" podID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerID="abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32" exitCode=0 Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.170408 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9f256" event={"ID":"98c421ac-c4e9-43e5-b76b-a2094fba7539","Type":"ContainerDied","Data":"abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32"} Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.170468 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9f256" event={"ID":"98c421ac-c4e9-43e5-b76b-a2094fba7539","Type":"ContainerDied","Data":"fee1f3765e9410f20c530708b63bc49e561027703c2567556abdf417f57ced79"} Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.170489 4861 scope.go:117] "RemoveContainer" containerID="abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.170494 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9f256" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.191200 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9f256"] Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.193327 4861 scope.go:117] "RemoveContainer" containerID="276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.222552 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9f256"] Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.225326 4861 scope.go:117] "RemoveContainer" containerID="6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.258169 4861 scope.go:117] "RemoveContainer" containerID="abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32" Jan 29 07:23:27 crc kubenswrapper[4861]: E0129 07:23:27.258867 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32\": container with ID starting with abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32 not found: ID does not exist" containerID="abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.258915 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32"} err="failed to get container status \"abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32\": rpc error: code = NotFound desc = could not find container \"abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32\": container with ID starting with abb3d11c6aa5c50db9e875bb2caf054ae911a3b783393ba2707115204b511e32 not found: ID does not exist" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.258943 4861 scope.go:117] "RemoveContainer" containerID="276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309" Jan 29 07:23:27 crc kubenswrapper[4861]: E0129 07:23:27.259361 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309\": container with ID starting with 276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309 not found: ID does not exist" containerID="276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.259402 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309"} err="failed to get container status \"276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309\": rpc error: code = NotFound desc = could not find container \"276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309\": container with ID starting with 276015e49cc9779253343fbc6e5f021a28481534e7bb827352f6b59f190c7309 not found: ID does not exist" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.259427 4861 scope.go:117] "RemoveContainer" containerID="6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674" Jan 29 07:23:27 crc kubenswrapper[4861]: E0129 07:23:27.259723 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674\": container with ID starting with 6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674 not found: ID does not exist" containerID="6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674" Jan 29 07:23:27 crc kubenswrapper[4861]: I0129 07:23:27.259767 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674"} err="failed to get container status \"6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674\": rpc error: code = NotFound desc = could not find container \"6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674\": container with ID starting with 6d8ab2e27c8c4758d2bd290515b27d87faf58fdfcf4596ab14dad15456309674 not found: ID does not exist" Jan 29 07:23:29 crc kubenswrapper[4861]: I0129 07:23:29.134545 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" path="/var/lib/kubelet/pods/98c421ac-c4e9-43e5-b76b-a2094fba7539/volumes" Jan 29 07:23:30 crc kubenswrapper[4861]: I0129 07:23:30.629478 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:23:30 crc kubenswrapper[4861]: I0129 07:23:30.630387 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:24:00 crc kubenswrapper[4861]: I0129 07:24:00.630633 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:24:00 crc kubenswrapper[4861]: I0129 07:24:00.631681 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:24:00 crc kubenswrapper[4861]: I0129 07:24:00.631768 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:24:00 crc kubenswrapper[4861]: I0129 07:24:00.633030 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:24:00 crc kubenswrapper[4861]: I0129 07:24:00.633202 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" gracePeriod=600 Jan 29 07:24:00 crc kubenswrapper[4861]: E0129 07:24:00.757743 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:24:01 crc kubenswrapper[4861]: I0129 07:24:01.460861 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" exitCode=0 Jan 29 07:24:01 crc kubenswrapper[4861]: I0129 07:24:01.460940 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19"} Jan 29 07:24:01 crc kubenswrapper[4861]: I0129 07:24:01.461357 4861 scope.go:117] "RemoveContainer" containerID="631251c83015ad5aae91e0d1e2a9c95708466a7913966eafee8faa3dc681a06e" Jan 29 07:24:01 crc kubenswrapper[4861]: I0129 07:24:01.461960 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:24:01 crc kubenswrapper[4861]: E0129 07:24:01.462327 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:24:16 crc kubenswrapper[4861]: I0129 07:24:16.117541 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:24:16 crc kubenswrapper[4861]: E0129 07:24:16.118250 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:24:31 crc kubenswrapper[4861]: I0129 07:24:31.116615 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:24:31 crc kubenswrapper[4861]: E0129 07:24:31.117595 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:24:45 crc kubenswrapper[4861]: I0129 07:24:45.116794 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:24:45 crc kubenswrapper[4861]: E0129 07:24:45.117449 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:24:56 crc kubenswrapper[4861]: I0129 07:24:56.117244 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:24:56 crc kubenswrapper[4861]: E0129 07:24:56.118584 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:25:11 crc kubenswrapper[4861]: I0129 07:25:11.120114 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:25:11 crc kubenswrapper[4861]: E0129 07:25:11.120822 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:25:22 crc kubenswrapper[4861]: I0129 07:25:22.117003 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:25:22 crc kubenswrapper[4861]: E0129 07:25:22.117518 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:25:37 crc kubenswrapper[4861]: I0129 07:25:37.116620 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:25:37 crc kubenswrapper[4861]: E0129 07:25:37.117446 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:25:48 crc kubenswrapper[4861]: I0129 07:25:48.116693 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:25:48 crc kubenswrapper[4861]: E0129 07:25:48.117841 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:26:02 crc kubenswrapper[4861]: I0129 07:26:02.116396 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:26:02 crc kubenswrapper[4861]: E0129 07:26:02.117067 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:26:15 crc kubenswrapper[4861]: I0129 07:26:15.116675 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:26:15 crc kubenswrapper[4861]: E0129 07:26:15.117458 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:26:26 crc kubenswrapper[4861]: I0129 07:26:26.118207 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:26:26 crc kubenswrapper[4861]: E0129 07:26:26.119644 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:26:38 crc kubenswrapper[4861]: I0129 07:26:38.116797 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:26:38 crc kubenswrapper[4861]: E0129 07:26:38.118138 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:26:53 crc kubenswrapper[4861]: I0129 07:26:53.116126 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:26:53 crc kubenswrapper[4861]: E0129 07:26:53.117133 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:27:08 crc kubenswrapper[4861]: I0129 07:27:08.116511 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:27:08 crc kubenswrapper[4861]: E0129 07:27:08.117369 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:27:19 crc kubenswrapper[4861]: I0129 07:27:19.121760 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:27:19 crc kubenswrapper[4861]: E0129 07:27:19.124603 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:27:31 crc kubenswrapper[4861]: I0129 07:27:31.116580 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:27:31 crc kubenswrapper[4861]: E0129 07:27:31.117310 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.027676 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7hjc9"] Jan 29 07:27:42 crc kubenswrapper[4861]: E0129 07:27:42.028529 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="extract-utilities" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.028546 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="extract-utilities" Jan 29 07:27:42 crc kubenswrapper[4861]: E0129 07:27:42.028559 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="registry-server" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.028568 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="registry-server" Jan 29 07:27:42 crc kubenswrapper[4861]: E0129 07:27:42.028589 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="extract-content" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.028597 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="extract-content" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.028789 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="98c421ac-c4e9-43e5-b76b-a2094fba7539" containerName="registry-server" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.030046 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.034046 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7hjc9"] Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.190307 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-catalog-content\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.190395 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-utilities\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.190450 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls2ns\" (UniqueName: \"kubernetes.io/projected/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-kube-api-access-ls2ns\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.291894 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-catalog-content\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.292011 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-utilities\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.292039 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls2ns\" (UniqueName: \"kubernetes.io/projected/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-kube-api-access-ls2ns\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.292674 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-catalog-content\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.292790 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-utilities\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.316882 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls2ns\" (UniqueName: \"kubernetes.io/projected/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-kube-api-access-ls2ns\") pod \"certified-operators-7hjc9\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.346386 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:42 crc kubenswrapper[4861]: I0129 07:27:42.661405 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7hjc9"] Jan 29 07:27:43 crc kubenswrapper[4861]: I0129 07:27:43.324736 4861 generic.go:334] "Generic (PLEG): container finished" podID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerID="bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479" exitCode=0 Jan 29 07:27:43 crc kubenswrapper[4861]: I0129 07:27:43.324802 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hjc9" event={"ID":"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0","Type":"ContainerDied","Data":"bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479"} Jan 29 07:27:43 crc kubenswrapper[4861]: I0129 07:27:43.325086 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hjc9" event={"ID":"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0","Type":"ContainerStarted","Data":"105208191dd1ba278e7ea65adc0979435c0375e0cac657ce317195667e43d28d"} Jan 29 07:27:43 crc kubenswrapper[4861]: I0129 07:27:43.326569 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 07:27:44 crc kubenswrapper[4861]: I0129 07:27:44.334580 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hjc9" event={"ID":"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0","Type":"ContainerStarted","Data":"ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc"} Jan 29 07:27:45 crc kubenswrapper[4861]: I0129 07:27:45.117647 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:27:45 crc kubenswrapper[4861]: E0129 07:27:45.117975 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:27:45 crc kubenswrapper[4861]: I0129 07:27:45.349425 4861 generic.go:334] "Generic (PLEG): container finished" podID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerID="ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc" exitCode=0 Jan 29 07:27:45 crc kubenswrapper[4861]: I0129 07:27:45.349475 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hjc9" event={"ID":"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0","Type":"ContainerDied","Data":"ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc"} Jan 29 07:27:46 crc kubenswrapper[4861]: I0129 07:27:46.356687 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hjc9" event={"ID":"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0","Type":"ContainerStarted","Data":"57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613"} Jan 29 07:27:46 crc kubenswrapper[4861]: I0129 07:27:46.377958 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7hjc9" podStartSLOduration=1.637509574 podStartE2EDuration="4.377936302s" podCreationTimestamp="2026-01-29 07:27:42 +0000 UTC" firstStartedPulling="2026-01-29 07:27:43.326351952 +0000 UTC m=+3154.997846509" lastFinishedPulling="2026-01-29 07:27:46.06677868 +0000 UTC m=+3157.738273237" observedRunningTime="2026-01-29 07:27:46.37292891 +0000 UTC m=+3158.044423467" watchObservedRunningTime="2026-01-29 07:27:46.377936302 +0000 UTC m=+3158.049430859" Jan 29 07:27:52 crc kubenswrapper[4861]: I0129 07:27:52.346757 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:52 crc kubenswrapper[4861]: I0129 07:27:52.347634 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:52 crc kubenswrapper[4861]: I0129 07:27:52.409785 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:53 crc kubenswrapper[4861]: I0129 07:27:53.481161 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:53 crc kubenswrapper[4861]: I0129 07:27:53.554661 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7hjc9"] Jan 29 07:27:55 crc kubenswrapper[4861]: I0129 07:27:55.426142 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7hjc9" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerName="registry-server" containerID="cri-o://57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613" gracePeriod=2 Jan 29 07:27:55 crc kubenswrapper[4861]: I0129 07:27:55.932444 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.120342 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-utilities\") pod \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.120458 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls2ns\" (UniqueName: \"kubernetes.io/projected/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-kube-api-access-ls2ns\") pod \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.120537 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-catalog-content\") pod \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\" (UID: \"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0\") " Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.122268 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-utilities" (OuterVolumeSpecName: "utilities") pod "acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" (UID: "acf704e8-ffd0-4ac2-93e0-02cd882c7ce0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.130446 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-kube-api-access-ls2ns" (OuterVolumeSpecName: "kube-api-access-ls2ns") pod "acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" (UID: "acf704e8-ffd0-4ac2-93e0-02cd882c7ce0"). InnerVolumeSpecName "kube-api-access-ls2ns". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.167843 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" (UID: "acf704e8-ffd0-4ac2-93e0-02cd882c7ce0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.223497 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.223537 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls2ns\" (UniqueName: \"kubernetes.io/projected/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-kube-api-access-ls2ns\") on node \"crc\" DevicePath \"\"" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.223554 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.438543 4861 generic.go:334] "Generic (PLEG): container finished" podID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerID="57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613" exitCode=0 Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.438602 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hjc9" event={"ID":"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0","Type":"ContainerDied","Data":"57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613"} Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.438620 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7hjc9" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.438643 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7hjc9" event={"ID":"acf704e8-ffd0-4ac2-93e0-02cd882c7ce0","Type":"ContainerDied","Data":"105208191dd1ba278e7ea65adc0979435c0375e0cac657ce317195667e43d28d"} Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.438677 4861 scope.go:117] "RemoveContainer" containerID="57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.472537 4861 scope.go:117] "RemoveContainer" containerID="ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.493678 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7hjc9"] Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.503118 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7hjc9"] Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.524673 4861 scope.go:117] "RemoveContainer" containerID="bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.576583 4861 scope.go:117] "RemoveContainer" containerID="57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613" Jan 29 07:27:56 crc kubenswrapper[4861]: E0129 07:27:56.577367 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613\": container with ID starting with 57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613 not found: ID does not exist" containerID="57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.577403 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613"} err="failed to get container status \"57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613\": rpc error: code = NotFound desc = could not find container \"57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613\": container with ID starting with 57a632d3909163757a8ff7506d73b778ebcddadd42cb7ea09d38399229722613 not found: ID does not exist" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.577422 4861 scope.go:117] "RemoveContainer" containerID="ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc" Jan 29 07:27:56 crc kubenswrapper[4861]: E0129 07:27:56.577955 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc\": container with ID starting with ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc not found: ID does not exist" containerID="ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.578066 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc"} err="failed to get container status \"ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc\": rpc error: code = NotFound desc = could not find container \"ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc\": container with ID starting with ff3a71be7d1b90e016e7847d7b2690cf34d59aebe7019b8b97d78285147380cc not found: ID does not exist" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.578211 4861 scope.go:117] "RemoveContainer" containerID="bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479" Jan 29 07:27:56 crc kubenswrapper[4861]: E0129 07:27:56.579398 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479\": container with ID starting with bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479 not found: ID does not exist" containerID="bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479" Jan 29 07:27:56 crc kubenswrapper[4861]: I0129 07:27:56.579479 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479"} err="failed to get container status \"bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479\": rpc error: code = NotFound desc = could not find container \"bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479\": container with ID starting with bdf0de61f3166c632f9e027097f0a1fea69407457d7da8e761f34d40bc134479 not found: ID does not exist" Jan 29 07:27:57 crc kubenswrapper[4861]: I0129 07:27:57.129428 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" path="/var/lib/kubelet/pods/acf704e8-ffd0-4ac2-93e0-02cd882c7ce0/volumes" Jan 29 07:27:58 crc kubenswrapper[4861]: I0129 07:27:58.116884 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:27:58 crc kubenswrapper[4861]: E0129 07:27:58.117744 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:28:12 crc kubenswrapper[4861]: I0129 07:28:12.117020 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:28:12 crc kubenswrapper[4861]: E0129 07:28:12.118059 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:28:23 crc kubenswrapper[4861]: I0129 07:28:23.116547 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:28:23 crc kubenswrapper[4861]: E0129 07:28:23.118389 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:28:35 crc kubenswrapper[4861]: I0129 07:28:35.116604 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:28:35 crc kubenswrapper[4861]: E0129 07:28:35.117280 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.193406 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rz5d4"] Jan 29 07:28:43 crc kubenswrapper[4861]: E0129 07:28:43.194173 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerName="extract-content" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.194185 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerName="extract-content" Jan 29 07:28:43 crc kubenswrapper[4861]: E0129 07:28:43.194234 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerName="registry-server" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.194241 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerName="registry-server" Jan 29 07:28:43 crc kubenswrapper[4861]: E0129 07:28:43.194249 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerName="extract-utilities" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.194255 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerName="extract-utilities" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.194388 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="acf704e8-ffd0-4ac2-93e0-02cd882c7ce0" containerName="registry-server" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.195447 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.252637 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rz5d4"] Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.372290 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-catalog-content\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.372339 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-utilities\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.372363 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzr45\" (UniqueName: \"kubernetes.io/projected/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-kube-api-access-kzr45\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.474348 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-catalog-content\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.474429 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-utilities\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.474473 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzr45\" (UniqueName: \"kubernetes.io/projected/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-kube-api-access-kzr45\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.474884 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-catalog-content\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.474894 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-utilities\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.499265 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzr45\" (UniqueName: \"kubernetes.io/projected/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-kube-api-access-kzr45\") pod \"community-operators-rz5d4\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:43 crc kubenswrapper[4861]: I0129 07:28:43.523169 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:44 crc kubenswrapper[4861]: I0129 07:28:44.062841 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rz5d4"] Jan 29 07:28:44 crc kubenswrapper[4861]: I0129 07:28:44.844682 4861 generic.go:334] "Generic (PLEG): container finished" podID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerID="cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478" exitCode=0 Jan 29 07:28:44 crc kubenswrapper[4861]: I0129 07:28:44.844795 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rz5d4" event={"ID":"6b977256-8e4b-4779-9c1e-cb0fc1f464e1","Type":"ContainerDied","Data":"cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478"} Jan 29 07:28:44 crc kubenswrapper[4861]: I0129 07:28:44.844987 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rz5d4" event={"ID":"6b977256-8e4b-4779-9c1e-cb0fc1f464e1","Type":"ContainerStarted","Data":"bfcc2a385ecddd7d3f7196e364a7a3e8646b3f082848044ee444c3bbe0095efc"} Jan 29 07:28:45 crc kubenswrapper[4861]: I0129 07:28:45.857109 4861 generic.go:334] "Generic (PLEG): container finished" podID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerID="c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f" exitCode=0 Jan 29 07:28:45 crc kubenswrapper[4861]: I0129 07:28:45.857233 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rz5d4" event={"ID":"6b977256-8e4b-4779-9c1e-cb0fc1f464e1","Type":"ContainerDied","Data":"c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f"} Jan 29 07:28:46 crc kubenswrapper[4861]: I0129 07:28:46.864458 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rz5d4" event={"ID":"6b977256-8e4b-4779-9c1e-cb0fc1f464e1","Type":"ContainerStarted","Data":"2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7"} Jan 29 07:28:46 crc kubenswrapper[4861]: I0129 07:28:46.883912 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rz5d4" podStartSLOduration=2.489138782 podStartE2EDuration="3.883893427s" podCreationTimestamp="2026-01-29 07:28:43 +0000 UTC" firstStartedPulling="2026-01-29 07:28:44.846179746 +0000 UTC m=+3216.517674303" lastFinishedPulling="2026-01-29 07:28:46.240934381 +0000 UTC m=+3217.912428948" observedRunningTime="2026-01-29 07:28:46.883267631 +0000 UTC m=+3218.554762208" watchObservedRunningTime="2026-01-29 07:28:46.883893427 +0000 UTC m=+3218.555387984" Jan 29 07:28:49 crc kubenswrapper[4861]: I0129 07:28:49.124055 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:28:49 crc kubenswrapper[4861]: E0129 07:28:49.124863 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:28:53 crc kubenswrapper[4861]: I0129 07:28:53.523762 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:53 crc kubenswrapper[4861]: I0129 07:28:53.525726 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:53 crc kubenswrapper[4861]: I0129 07:28:53.585628 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:53 crc kubenswrapper[4861]: I0129 07:28:53.983578 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:54 crc kubenswrapper[4861]: I0129 07:28:54.381523 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rz5d4"] Jan 29 07:28:55 crc kubenswrapper[4861]: I0129 07:28:55.935145 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rz5d4" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerName="registry-server" containerID="cri-o://2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7" gracePeriod=2 Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.652167 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.786707 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-catalog-content\") pod \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.786955 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzr45\" (UniqueName: \"kubernetes.io/projected/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-kube-api-access-kzr45\") pod \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.787033 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-utilities\") pod \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\" (UID: \"6b977256-8e4b-4779-9c1e-cb0fc1f464e1\") " Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.787845 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-utilities" (OuterVolumeSpecName: "utilities") pod "6b977256-8e4b-4779-9c1e-cb0fc1f464e1" (UID: "6b977256-8e4b-4779-9c1e-cb0fc1f464e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.795407 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-kube-api-access-kzr45" (OuterVolumeSpecName: "kube-api-access-kzr45") pod "6b977256-8e4b-4779-9c1e-cb0fc1f464e1" (UID: "6b977256-8e4b-4779-9c1e-cb0fc1f464e1"). InnerVolumeSpecName "kube-api-access-kzr45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.877380 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b977256-8e4b-4779-9c1e-cb0fc1f464e1" (UID: "6b977256-8e4b-4779-9c1e-cb0fc1f464e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.888749 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzr45\" (UniqueName: \"kubernetes.io/projected/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-kube-api-access-kzr45\") on node \"crc\" DevicePath \"\"" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.888816 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.888830 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b977256-8e4b-4779-9c1e-cb0fc1f464e1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.946340 4861 generic.go:334] "Generic (PLEG): container finished" podID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerID="2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7" exitCode=0 Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.946426 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rz5d4" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.946436 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rz5d4" event={"ID":"6b977256-8e4b-4779-9c1e-cb0fc1f464e1","Type":"ContainerDied","Data":"2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7"} Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.946502 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rz5d4" event={"ID":"6b977256-8e4b-4779-9c1e-cb0fc1f464e1","Type":"ContainerDied","Data":"bfcc2a385ecddd7d3f7196e364a7a3e8646b3f082848044ee444c3bbe0095efc"} Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.946524 4861 scope.go:117] "RemoveContainer" containerID="2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.965991 4861 scope.go:117] "RemoveContainer" containerID="c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f" Jan 29 07:28:56 crc kubenswrapper[4861]: I0129 07:28:56.995587 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rz5d4"] Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.000001 4861 scope.go:117] "RemoveContainer" containerID="cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478" Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.006455 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rz5d4"] Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.029578 4861 scope.go:117] "RemoveContainer" containerID="2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7" Jan 29 07:28:57 crc kubenswrapper[4861]: E0129 07:28:57.030181 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7\": container with ID starting with 2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7 not found: ID does not exist" containerID="2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7" Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.030243 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7"} err="failed to get container status \"2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7\": rpc error: code = NotFound desc = could not find container \"2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7\": container with ID starting with 2d450b1cd8e652782d02ddd2878617077ed40dfbd4e241cbefb244323427ffe7 not found: ID does not exist" Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.030322 4861 scope.go:117] "RemoveContainer" containerID="c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f" Jan 29 07:28:57 crc kubenswrapper[4861]: E0129 07:28:57.030678 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f\": container with ID starting with c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f not found: ID does not exist" containerID="c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f" Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.030832 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f"} err="failed to get container status \"c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f\": rpc error: code = NotFound desc = could not find container \"c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f\": container with ID starting with c9de7c2211df2d7537cf7ad2a7f8008c570df6859f15d14938976aa79fe1be5f not found: ID does not exist" Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.030942 4861 scope.go:117] "RemoveContainer" containerID="cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478" Jan 29 07:28:57 crc kubenswrapper[4861]: E0129 07:28:57.031620 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478\": container with ID starting with cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478 not found: ID does not exist" containerID="cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478" Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.031762 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478"} err="failed to get container status \"cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478\": rpc error: code = NotFound desc = could not find container \"cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478\": container with ID starting with cd02ad086546cafa2f9afb5bd7b721f3622fbba7cd83b01d55021d725bfb1478 not found: ID does not exist" Jan 29 07:28:57 crc kubenswrapper[4861]: I0129 07:28:57.128177 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" path="/var/lib/kubelet/pods/6b977256-8e4b-4779-9c1e-cb0fc1f464e1/volumes" Jan 29 07:29:00 crc kubenswrapper[4861]: I0129 07:29:00.117397 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:29:00 crc kubenswrapper[4861]: E0129 07:29:00.118146 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:29:14 crc kubenswrapper[4861]: I0129 07:29:14.116750 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:29:15 crc kubenswrapper[4861]: I0129 07:29:15.130260 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"643eb0a74449c49d44ed13cfe5e1509e3b0f25c9d842b22e0e96f4da6185efa3"} Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.147977 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m"] Jan 29 07:30:00 crc kubenswrapper[4861]: E0129 07:30:00.149759 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerName="registry-server" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.149801 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerName="registry-server" Jan 29 07:30:00 crc kubenswrapper[4861]: E0129 07:30:00.149862 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerName="extract-utilities" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.149876 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerName="extract-utilities" Jan 29 07:30:00 crc kubenswrapper[4861]: E0129 07:30:00.149891 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerName="extract-content" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.149907 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerName="extract-content" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.150193 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b977256-8e4b-4779-9c1e-cb0fc1f464e1" containerName="registry-server" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.151001 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.154175 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.155467 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.165996 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m"] Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.270458 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ba09b3c-c560-4a97-904f-3691faea50ef-secret-volume\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.270532 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dssng\" (UniqueName: \"kubernetes.io/projected/3ba09b3c-c560-4a97-904f-3691faea50ef-kube-api-access-dssng\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.270709 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ba09b3c-c560-4a97-904f-3691faea50ef-config-volume\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.371517 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ba09b3c-c560-4a97-904f-3691faea50ef-config-volume\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.371607 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ba09b3c-c560-4a97-904f-3691faea50ef-secret-volume\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.371645 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dssng\" (UniqueName: \"kubernetes.io/projected/3ba09b3c-c560-4a97-904f-3691faea50ef-kube-api-access-dssng\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.372385 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ba09b3c-c560-4a97-904f-3691faea50ef-config-volume\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.383181 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ba09b3c-c560-4a97-904f-3691faea50ef-secret-volume\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.391626 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dssng\" (UniqueName: \"kubernetes.io/projected/3ba09b3c-c560-4a97-904f-3691faea50ef-kube-api-access-dssng\") pod \"collect-profiles-29494530-jdd2m\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.507310 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:00 crc kubenswrapper[4861]: I0129 07:30:00.913394 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m"] Jan 29 07:30:01 crc kubenswrapper[4861]: I0129 07:30:01.500128 4861 generic.go:334] "Generic (PLEG): container finished" podID="3ba09b3c-c560-4a97-904f-3691faea50ef" containerID="b3fd5f59f1baea8946161da2adadd03b06a4fef3c6184f206e96569a0f8420dc" exitCode=0 Jan 29 07:30:01 crc kubenswrapper[4861]: I0129 07:30:01.500200 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" event={"ID":"3ba09b3c-c560-4a97-904f-3691faea50ef","Type":"ContainerDied","Data":"b3fd5f59f1baea8946161da2adadd03b06a4fef3c6184f206e96569a0f8420dc"} Jan 29 07:30:01 crc kubenswrapper[4861]: I0129 07:30:01.500444 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" event={"ID":"3ba09b3c-c560-4a97-904f-3691faea50ef","Type":"ContainerStarted","Data":"766344fec664f0cac5494245518baf23216bc9a5e2947b00c0e90f318e0fd4e9"} Jan 29 07:30:02 crc kubenswrapper[4861]: I0129 07:30:02.847354 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:02 crc kubenswrapper[4861]: I0129 07:30:02.908778 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ba09b3c-c560-4a97-904f-3691faea50ef-secret-volume\") pod \"3ba09b3c-c560-4a97-904f-3691faea50ef\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " Jan 29 07:30:02 crc kubenswrapper[4861]: I0129 07:30:02.908839 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ba09b3c-c560-4a97-904f-3691faea50ef-config-volume\") pod \"3ba09b3c-c560-4a97-904f-3691faea50ef\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " Jan 29 07:30:02 crc kubenswrapper[4861]: I0129 07:30:02.908877 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dssng\" (UniqueName: \"kubernetes.io/projected/3ba09b3c-c560-4a97-904f-3691faea50ef-kube-api-access-dssng\") pod \"3ba09b3c-c560-4a97-904f-3691faea50ef\" (UID: \"3ba09b3c-c560-4a97-904f-3691faea50ef\") " Jan 29 07:30:02 crc kubenswrapper[4861]: I0129 07:30:02.909598 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ba09b3c-c560-4a97-904f-3691faea50ef-config-volume" (OuterVolumeSpecName: "config-volume") pod "3ba09b3c-c560-4a97-904f-3691faea50ef" (UID: "3ba09b3c-c560-4a97-904f-3691faea50ef"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:30:02 crc kubenswrapper[4861]: I0129 07:30:02.914849 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ba09b3c-c560-4a97-904f-3691faea50ef-kube-api-access-dssng" (OuterVolumeSpecName: "kube-api-access-dssng") pod "3ba09b3c-c560-4a97-904f-3691faea50ef" (UID: "3ba09b3c-c560-4a97-904f-3691faea50ef"). InnerVolumeSpecName "kube-api-access-dssng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:30:02 crc kubenswrapper[4861]: I0129 07:30:02.924286 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ba09b3c-c560-4a97-904f-3691faea50ef-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3ba09b3c-c560-4a97-904f-3691faea50ef" (UID: "3ba09b3c-c560-4a97-904f-3691faea50ef"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 07:30:03 crc kubenswrapper[4861]: I0129 07:30:03.010215 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dssng\" (UniqueName: \"kubernetes.io/projected/3ba09b3c-c560-4a97-904f-3691faea50ef-kube-api-access-dssng\") on node \"crc\" DevicePath \"\"" Jan 29 07:30:03 crc kubenswrapper[4861]: I0129 07:30:03.010253 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ba09b3c-c560-4a97-904f-3691faea50ef-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 07:30:03 crc kubenswrapper[4861]: I0129 07:30:03.010264 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ba09b3c-c560-4a97-904f-3691faea50ef-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 07:30:03 crc kubenswrapper[4861]: I0129 07:30:03.520809 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" event={"ID":"3ba09b3c-c560-4a97-904f-3691faea50ef","Type":"ContainerDied","Data":"766344fec664f0cac5494245518baf23216bc9a5e2947b00c0e90f318e0fd4e9"} Jan 29 07:30:03 crc kubenswrapper[4861]: I0129 07:30:03.520866 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="766344fec664f0cac5494245518baf23216bc9a5e2947b00c0e90f318e0fd4e9" Jan 29 07:30:03 crc kubenswrapper[4861]: I0129 07:30:03.520870 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m" Jan 29 07:30:03 crc kubenswrapper[4861]: I0129 07:30:03.939806 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5"] Jan 29 07:30:03 crc kubenswrapper[4861]: I0129 07:30:03.947348 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494485-42mq5"] Jan 29 07:30:05 crc kubenswrapper[4861]: I0129 07:30:05.133810 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="718f749b-e564-48bf-8df8-8d3070a0fb53" path="/var/lib/kubelet/pods/718f749b-e564-48bf-8df8-8d3070a0fb53/volumes" Jan 29 07:30:24 crc kubenswrapper[4861]: I0129 07:30:24.441802 4861 scope.go:117] "RemoveContainer" containerID="54dc8617377b933b87ae72bc3453242a94358ec959f01d1e1ce0d668620102bb" Jan 29 07:31:30 crc kubenswrapper[4861]: I0129 07:31:30.630610 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:31:30 crc kubenswrapper[4861]: I0129 07:31:30.631501 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:32:00 crc kubenswrapper[4861]: I0129 07:32:00.629599 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:32:00 crc kubenswrapper[4861]: I0129 07:32:00.630365 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.550439 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rz296"] Jan 29 07:32:29 crc kubenswrapper[4861]: E0129 07:32:29.551825 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ba09b3c-c560-4a97-904f-3691faea50ef" containerName="collect-profiles" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.551855 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ba09b3c-c560-4a97-904f-3691faea50ef" containerName="collect-profiles" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.553942 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ba09b3c-c560-4a97-904f-3691faea50ef" containerName="collect-profiles" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.556148 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.567736 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rz296"] Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.586705 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hp7gw\" (UniqueName: \"kubernetes.io/projected/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-kube-api-access-hp7gw\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.586761 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-catalog-content\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.586787 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-utilities\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.688251 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hp7gw\" (UniqueName: \"kubernetes.io/projected/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-kube-api-access-hp7gw\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.688305 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-catalog-content\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.688334 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-utilities\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.688744 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-utilities\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.688851 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-catalog-content\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.712902 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hp7gw\" (UniqueName: \"kubernetes.io/projected/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-kube-api-access-hp7gw\") pod \"redhat-marketplace-rz296\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:29 crc kubenswrapper[4861]: I0129 07:32:29.884588 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.297186 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rz296"] Jan 29 07:32:30 crc kubenswrapper[4861]: W0129 07:32:30.308103 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4e8a2d2_60a5_4db9_a209_0aa3dd9b11d5.slice/crio-8ff3165e5980bd26d3544e0d8c142c12dd6209bcdef2586e6080c23aeb3fd422 WatchSource:0}: Error finding container 8ff3165e5980bd26d3544e0d8c142c12dd6209bcdef2586e6080c23aeb3fd422: Status 404 returned error can't find the container with id 8ff3165e5980bd26d3544e0d8c142c12dd6209bcdef2586e6080c23aeb3fd422 Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.583272 4861 generic.go:334] "Generic (PLEG): container finished" podID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerID="008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b" exitCode=0 Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.583320 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rz296" event={"ID":"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5","Type":"ContainerDied","Data":"008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b"} Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.583349 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rz296" event={"ID":"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5","Type":"ContainerStarted","Data":"8ff3165e5980bd26d3544e0d8c142c12dd6209bcdef2586e6080c23aeb3fd422"} Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.630124 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.630196 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.630252 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.631010 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"643eb0a74449c49d44ed13cfe5e1509e3b0f25c9d842b22e0e96f4da6185efa3"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:32:30 crc kubenswrapper[4861]: I0129 07:32:30.631105 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://643eb0a74449c49d44ed13cfe5e1509e3b0f25c9d842b22e0e96f4da6185efa3" gracePeriod=600 Jan 29 07:32:31 crc kubenswrapper[4861]: I0129 07:32:31.593747 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="643eb0a74449c49d44ed13cfe5e1509e3b0f25c9d842b22e0e96f4da6185efa3" exitCode=0 Jan 29 07:32:31 crc kubenswrapper[4861]: I0129 07:32:31.593822 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"643eb0a74449c49d44ed13cfe5e1509e3b0f25c9d842b22e0e96f4da6185efa3"} Jan 29 07:32:31 crc kubenswrapper[4861]: I0129 07:32:31.594417 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051"} Jan 29 07:32:31 crc kubenswrapper[4861]: I0129 07:32:31.594444 4861 scope.go:117] "RemoveContainer" containerID="96e0c651dd334c3612b58b7897f78a3da2981b4cff3a0561a92faea8fa275b19" Jan 29 07:32:31 crc kubenswrapper[4861]: I0129 07:32:31.599997 4861 generic.go:334] "Generic (PLEG): container finished" podID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerID="614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625" exitCode=0 Jan 29 07:32:31 crc kubenswrapper[4861]: I0129 07:32:31.600044 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rz296" event={"ID":"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5","Type":"ContainerDied","Data":"614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625"} Jan 29 07:32:32 crc kubenswrapper[4861]: I0129 07:32:32.611970 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rz296" event={"ID":"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5","Type":"ContainerStarted","Data":"ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df"} Jan 29 07:32:32 crc kubenswrapper[4861]: I0129 07:32:32.641223 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rz296" podStartSLOduration=2.249565263 podStartE2EDuration="3.641200257s" podCreationTimestamp="2026-01-29 07:32:29 +0000 UTC" firstStartedPulling="2026-01-29 07:32:30.586495375 +0000 UTC m=+3442.257989932" lastFinishedPulling="2026-01-29 07:32:31.978130359 +0000 UTC m=+3443.649624926" observedRunningTime="2026-01-29 07:32:32.634548458 +0000 UTC m=+3444.306043035" watchObservedRunningTime="2026-01-29 07:32:32.641200257 +0000 UTC m=+3444.312694814" Jan 29 07:32:39 crc kubenswrapper[4861]: I0129 07:32:39.885578 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:39 crc kubenswrapper[4861]: I0129 07:32:39.886395 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:39 crc kubenswrapper[4861]: I0129 07:32:39.950390 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:40 crc kubenswrapper[4861]: I0129 07:32:40.741995 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:40 crc kubenswrapper[4861]: I0129 07:32:40.804235 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rz296"] Jan 29 07:32:42 crc kubenswrapper[4861]: I0129 07:32:42.701798 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rz296" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerName="registry-server" containerID="cri-o://ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df" gracePeriod=2 Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.107525 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.207400 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hp7gw\" (UniqueName: \"kubernetes.io/projected/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-kube-api-access-hp7gw\") pod \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.207513 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-catalog-content\") pod \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.207604 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-utilities\") pod \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\" (UID: \"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5\") " Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.208666 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-utilities" (OuterVolumeSpecName: "utilities") pod "d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" (UID: "d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.219261 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-kube-api-access-hp7gw" (OuterVolumeSpecName: "kube-api-access-hp7gw") pod "d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" (UID: "d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5"). InnerVolumeSpecName "kube-api-access-hp7gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.239148 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" (UID: "d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.309345 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hp7gw\" (UniqueName: \"kubernetes.io/projected/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-kube-api-access-hp7gw\") on node \"crc\" DevicePath \"\"" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.309398 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.309417 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.711042 4861 generic.go:334] "Generic (PLEG): container finished" podID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerID="ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df" exitCode=0 Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.711122 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rz296" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.711107 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rz296" event={"ID":"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5","Type":"ContainerDied","Data":"ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df"} Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.711292 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rz296" event={"ID":"d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5","Type":"ContainerDied","Data":"8ff3165e5980bd26d3544e0d8c142c12dd6209bcdef2586e6080c23aeb3fd422"} Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.711322 4861 scope.go:117] "RemoveContainer" containerID="ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.749624 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rz296"] Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.767874 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rz296"] Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.768858 4861 scope.go:117] "RemoveContainer" containerID="614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.788017 4861 scope.go:117] "RemoveContainer" containerID="008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.806876 4861 scope.go:117] "RemoveContainer" containerID="ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df" Jan 29 07:32:43 crc kubenswrapper[4861]: E0129 07:32:43.807319 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df\": container with ID starting with ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df not found: ID does not exist" containerID="ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.807391 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df"} err="failed to get container status \"ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df\": rpc error: code = NotFound desc = could not find container \"ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df\": container with ID starting with ee9c4d75ea4967d49e936715b042624ac7393387cd111aab7e65995e60fa00df not found: ID does not exist" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.807430 4861 scope.go:117] "RemoveContainer" containerID="614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625" Jan 29 07:32:43 crc kubenswrapper[4861]: E0129 07:32:43.807798 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625\": container with ID starting with 614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625 not found: ID does not exist" containerID="614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.807828 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625"} err="failed to get container status \"614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625\": rpc error: code = NotFound desc = could not find container \"614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625\": container with ID starting with 614756a8ee607ed8051579eaa38e650d400a39b8c71af72c0209ef3089289625 not found: ID does not exist" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.807844 4861 scope.go:117] "RemoveContainer" containerID="008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b" Jan 29 07:32:43 crc kubenswrapper[4861]: E0129 07:32:43.808125 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b\": container with ID starting with 008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b not found: ID does not exist" containerID="008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b" Jan 29 07:32:43 crc kubenswrapper[4861]: I0129 07:32:43.808186 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b"} err="failed to get container status \"008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b\": rpc error: code = NotFound desc = could not find container \"008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b\": container with ID starting with 008c5a62d1ba22dfe4a44a3e02cd6ef6445fee2cef0537869f3258005d02c72b not found: ID does not exist" Jan 29 07:32:45 crc kubenswrapper[4861]: I0129 07:32:45.127039 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" path="/var/lib/kubelet/pods/d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5/volumes" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.518045 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-44v9r"] Jan 29 07:34:28 crc kubenswrapper[4861]: E0129 07:34:28.523574 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerName="extract-content" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.523610 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerName="extract-content" Jan 29 07:34:28 crc kubenswrapper[4861]: E0129 07:34:28.523650 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerName="extract-utilities" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.523670 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerName="extract-utilities" Jan 29 07:34:28 crc kubenswrapper[4861]: E0129 07:34:28.523743 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerName="registry-server" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.523766 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerName="registry-server" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.524067 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4e8a2d2-60a5-4db9-a209-0aa3dd9b11d5" containerName="registry-server" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.526029 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.538767 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-44v9r"] Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.658725 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hj8kb\" (UniqueName: \"kubernetes.io/projected/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-kube-api-access-hj8kb\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.658972 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-utilities\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.659233 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-catalog-content\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.760297 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hj8kb\" (UniqueName: \"kubernetes.io/projected/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-kube-api-access-hj8kb\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.760433 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-utilities\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.760497 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-catalog-content\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.760954 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-utilities\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.761046 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-catalog-content\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.795721 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hj8kb\" (UniqueName: \"kubernetes.io/projected/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-kube-api-access-hj8kb\") pod \"redhat-operators-44v9r\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:28 crc kubenswrapper[4861]: I0129 07:34:28.852459 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:29 crc kubenswrapper[4861]: I0129 07:34:29.335670 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-44v9r"] Jan 29 07:34:29 crc kubenswrapper[4861]: W0129 07:34:29.346224 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24ed177e_32e8_45e0_a2b1_2efc0a2f9b43.slice/crio-ac07ee429283949df3dbc061c2e1432c23fd5f4120a6e2288cf04393153d8262 WatchSource:0}: Error finding container ac07ee429283949df3dbc061c2e1432c23fd5f4120a6e2288cf04393153d8262: Status 404 returned error can't find the container with id ac07ee429283949df3dbc061c2e1432c23fd5f4120a6e2288cf04393153d8262 Jan 29 07:34:29 crc kubenswrapper[4861]: I0129 07:34:29.622686 4861 generic.go:334] "Generic (PLEG): container finished" podID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerID="1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7" exitCode=0 Jan 29 07:34:29 crc kubenswrapper[4861]: I0129 07:34:29.622750 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44v9r" event={"ID":"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43","Type":"ContainerDied","Data":"1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7"} Jan 29 07:34:29 crc kubenswrapper[4861]: I0129 07:34:29.623060 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44v9r" event={"ID":"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43","Type":"ContainerStarted","Data":"ac07ee429283949df3dbc061c2e1432c23fd5f4120a6e2288cf04393153d8262"} Jan 29 07:34:29 crc kubenswrapper[4861]: I0129 07:34:29.624160 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 07:34:30 crc kubenswrapper[4861]: I0129 07:34:30.629477 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:34:30 crc kubenswrapper[4861]: I0129 07:34:30.629777 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:34:30 crc kubenswrapper[4861]: I0129 07:34:30.631140 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44v9r" event={"ID":"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43","Type":"ContainerStarted","Data":"1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0"} Jan 29 07:34:31 crc kubenswrapper[4861]: I0129 07:34:31.642435 4861 generic.go:334] "Generic (PLEG): container finished" podID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerID="1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0" exitCode=0 Jan 29 07:34:31 crc kubenswrapper[4861]: I0129 07:34:31.642495 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44v9r" event={"ID":"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43","Type":"ContainerDied","Data":"1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0"} Jan 29 07:34:32 crc kubenswrapper[4861]: I0129 07:34:32.653873 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44v9r" event={"ID":"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43","Type":"ContainerStarted","Data":"ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9"} Jan 29 07:34:32 crc kubenswrapper[4861]: I0129 07:34:32.690468 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-44v9r" podStartSLOduration=2.25759651 podStartE2EDuration="4.690432446s" podCreationTimestamp="2026-01-29 07:34:28 +0000 UTC" firstStartedPulling="2026-01-29 07:34:29.623923936 +0000 UTC m=+3561.295418493" lastFinishedPulling="2026-01-29 07:34:32.056759852 +0000 UTC m=+3563.728254429" observedRunningTime="2026-01-29 07:34:32.681437366 +0000 UTC m=+3564.352931943" watchObservedRunningTime="2026-01-29 07:34:32.690432446 +0000 UTC m=+3564.361927043" Jan 29 07:34:38 crc kubenswrapper[4861]: I0129 07:34:38.853480 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:38 crc kubenswrapper[4861]: I0129 07:34:38.854372 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:39 crc kubenswrapper[4861]: I0129 07:34:39.924791 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-44v9r" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="registry-server" probeResult="failure" output=< Jan 29 07:34:39 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 07:34:39 crc kubenswrapper[4861]: > Jan 29 07:34:48 crc kubenswrapper[4861]: I0129 07:34:48.917490 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:48 crc kubenswrapper[4861]: I0129 07:34:48.996700 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:49 crc kubenswrapper[4861]: I0129 07:34:49.164539 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-44v9r"] Jan 29 07:34:50 crc kubenswrapper[4861]: I0129 07:34:50.795671 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-44v9r" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="registry-server" containerID="cri-o://ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9" gracePeriod=2 Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.275342 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.412170 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-utilities\") pod \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.412281 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-catalog-content\") pod \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.412327 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hj8kb\" (UniqueName: \"kubernetes.io/projected/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-kube-api-access-hj8kb\") pod \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\" (UID: \"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43\") " Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.413387 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-utilities" (OuterVolumeSpecName: "utilities") pod "24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" (UID: "24ed177e-32e8-45e0-a2b1-2efc0a2f9b43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.420512 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-kube-api-access-hj8kb" (OuterVolumeSpecName: "kube-api-access-hj8kb") pod "24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" (UID: "24ed177e-32e8-45e0-a2b1-2efc0a2f9b43"). InnerVolumeSpecName "kube-api-access-hj8kb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.513626 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.513655 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hj8kb\" (UniqueName: \"kubernetes.io/projected/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-kube-api-access-hj8kb\") on node \"crc\" DevicePath \"\"" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.572293 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" (UID: "24ed177e-32e8-45e0-a2b1-2efc0a2f9b43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.615329 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.812129 4861 generic.go:334] "Generic (PLEG): container finished" podID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerID="ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9" exitCode=0 Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.812215 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44v9r" event={"ID":"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43","Type":"ContainerDied","Data":"ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9"} Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.812277 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44v9r" event={"ID":"24ed177e-32e8-45e0-a2b1-2efc0a2f9b43","Type":"ContainerDied","Data":"ac07ee429283949df3dbc061c2e1432c23fd5f4120a6e2288cf04393153d8262"} Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.812308 4861 scope.go:117] "RemoveContainer" containerID="ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.812519 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-44v9r" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.844273 4861 scope.go:117] "RemoveContainer" containerID="1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.865386 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-44v9r"] Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.871490 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-44v9r"] Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.901340 4861 scope.go:117] "RemoveContainer" containerID="1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.926118 4861 scope.go:117] "RemoveContainer" containerID="ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9" Jan 29 07:34:51 crc kubenswrapper[4861]: E0129 07:34:51.926697 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9\": container with ID starting with ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9 not found: ID does not exist" containerID="ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.926750 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9"} err="failed to get container status \"ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9\": rpc error: code = NotFound desc = could not find container \"ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9\": container with ID starting with ba86cde29a503c5f3f4381548c4509043742a9c05351758274c28df75681c6f9 not found: ID does not exist" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.926780 4861 scope.go:117] "RemoveContainer" containerID="1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0" Jan 29 07:34:51 crc kubenswrapper[4861]: E0129 07:34:51.927160 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0\": container with ID starting with 1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0 not found: ID does not exist" containerID="1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.927219 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0"} err="failed to get container status \"1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0\": rpc error: code = NotFound desc = could not find container \"1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0\": container with ID starting with 1f5da7902ec11e8e6921a4897cd1163039ed88f537c6a17ba8c1e8ab8f31baf0 not found: ID does not exist" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.927255 4861 scope.go:117] "RemoveContainer" containerID="1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7" Jan 29 07:34:51 crc kubenswrapper[4861]: E0129 07:34:51.927854 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7\": container with ID starting with 1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7 not found: ID does not exist" containerID="1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7" Jan 29 07:34:51 crc kubenswrapper[4861]: I0129 07:34:51.927897 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7"} err="failed to get container status \"1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7\": rpc error: code = NotFound desc = could not find container \"1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7\": container with ID starting with 1187e6ae6e0dea5804e750e8263f5dc169a2578c6233371fc45d3462b4ca5da7 not found: ID does not exist" Jan 29 07:34:53 crc kubenswrapper[4861]: I0129 07:34:53.131722 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" path="/var/lib/kubelet/pods/24ed177e-32e8-45e0-a2b1-2efc0a2f9b43/volumes" Jan 29 07:35:00 crc kubenswrapper[4861]: I0129 07:35:00.629695 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:35:00 crc kubenswrapper[4861]: I0129 07:35:00.630237 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:35:30 crc kubenswrapper[4861]: I0129 07:35:30.629986 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:35:30 crc kubenswrapper[4861]: I0129 07:35:30.630567 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:35:30 crc kubenswrapper[4861]: I0129 07:35:30.630613 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:35:30 crc kubenswrapper[4861]: I0129 07:35:30.631254 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:35:30 crc kubenswrapper[4861]: I0129 07:35:30.631310 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" gracePeriod=600 Jan 29 07:35:30 crc kubenswrapper[4861]: E0129 07:35:30.765376 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:35:31 crc kubenswrapper[4861]: I0129 07:35:31.127651 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" exitCode=0 Jan 29 07:35:31 crc kubenswrapper[4861]: I0129 07:35:31.133176 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051"} Jan 29 07:35:31 crc kubenswrapper[4861]: I0129 07:35:31.133238 4861 scope.go:117] "RemoveContainer" containerID="643eb0a74449c49d44ed13cfe5e1509e3b0f25c9d842b22e0e96f4da6185efa3" Jan 29 07:35:31 crc kubenswrapper[4861]: I0129 07:35:31.134931 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:35:31 crc kubenswrapper[4861]: E0129 07:35:31.135418 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:35:44 crc kubenswrapper[4861]: I0129 07:35:44.116434 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:35:44 crc kubenswrapper[4861]: E0129 07:35:44.117267 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:35:55 crc kubenswrapper[4861]: I0129 07:35:55.117291 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:35:55 crc kubenswrapper[4861]: E0129 07:35:55.118182 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:36:06 crc kubenswrapper[4861]: I0129 07:36:06.116526 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:36:06 crc kubenswrapper[4861]: E0129 07:36:06.117574 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:36:18 crc kubenswrapper[4861]: I0129 07:36:18.116934 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:36:18 crc kubenswrapper[4861]: E0129 07:36:18.118121 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:36:33 crc kubenswrapper[4861]: I0129 07:36:33.132138 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:36:33 crc kubenswrapper[4861]: E0129 07:36:33.133630 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:36:48 crc kubenswrapper[4861]: I0129 07:36:48.116303 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:36:48 crc kubenswrapper[4861]: E0129 07:36:48.117698 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:37:01 crc kubenswrapper[4861]: I0129 07:37:01.116994 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:37:01 crc kubenswrapper[4861]: E0129 07:37:01.118015 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:37:15 crc kubenswrapper[4861]: I0129 07:37:15.333956 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:37:15 crc kubenswrapper[4861]: E0129 07:37:15.337961 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:37:30 crc kubenswrapper[4861]: I0129 07:37:30.116640 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:37:30 crc kubenswrapper[4861]: E0129 07:37:30.117700 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:37:44 crc kubenswrapper[4861]: I0129 07:37:44.117333 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:37:44 crc kubenswrapper[4861]: E0129 07:37:44.118590 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:37:58 crc kubenswrapper[4861]: I0129 07:37:58.116567 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:37:58 crc kubenswrapper[4861]: E0129 07:37:58.117447 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:38:09 crc kubenswrapper[4861]: I0129 07:38:09.120576 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:38:09 crc kubenswrapper[4861]: E0129 07:38:09.121114 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.095562 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4wkh7"] Jan 29 07:38:12 crc kubenswrapper[4861]: E0129 07:38:12.096983 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="extract-utilities" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.097530 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="extract-utilities" Jan 29 07:38:12 crc kubenswrapper[4861]: E0129 07:38:12.097672 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="extract-content" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.097755 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="extract-content" Jan 29 07:38:12 crc kubenswrapper[4861]: E0129 07:38:12.097852 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="registry-server" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.097939 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="registry-server" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.098240 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="24ed177e-32e8-45e0-a2b1-2efc0a2f9b43" containerName="registry-server" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.099839 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.124703 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4wkh7"] Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.142831 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-catalog-content\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.142933 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-utilities\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.143005 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r9r2\" (UniqueName: \"kubernetes.io/projected/12d5e7cd-4284-4809-838f-4db140a06cf3-kube-api-access-9r9r2\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.244855 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-catalog-content\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.244980 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-utilities\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.245055 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r9r2\" (UniqueName: \"kubernetes.io/projected/12d5e7cd-4284-4809-838f-4db140a06cf3-kube-api-access-9r9r2\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.245361 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-catalog-content\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.245616 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-utilities\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.269380 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r9r2\" (UniqueName: \"kubernetes.io/projected/12d5e7cd-4284-4809-838f-4db140a06cf3-kube-api-access-9r9r2\") pod \"certified-operators-4wkh7\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.477824 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.728124 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4wkh7"] Jan 29 07:38:12 crc kubenswrapper[4861]: I0129 07:38:12.825310 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wkh7" event={"ID":"12d5e7cd-4284-4809-838f-4db140a06cf3","Type":"ContainerStarted","Data":"621b5baf9ab6ad6f822c1fbc6c63d579399a18c495da3a33d6c107d7ea1f3f40"} Jan 29 07:38:13 crc kubenswrapper[4861]: I0129 07:38:13.833780 4861 generic.go:334] "Generic (PLEG): container finished" podID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerID="2f5a53dd4eceee930d3ca2be268f24a26dc6a17a8e9915f48fb8e1d265cd7a3c" exitCode=0 Jan 29 07:38:13 crc kubenswrapper[4861]: I0129 07:38:13.834206 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wkh7" event={"ID":"12d5e7cd-4284-4809-838f-4db140a06cf3","Type":"ContainerDied","Data":"2f5a53dd4eceee930d3ca2be268f24a26dc6a17a8e9915f48fb8e1d265cd7a3c"} Jan 29 07:38:14 crc kubenswrapper[4861]: I0129 07:38:14.842775 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wkh7" event={"ID":"12d5e7cd-4284-4809-838f-4db140a06cf3","Type":"ContainerStarted","Data":"549dc79b566af160556253b88ca23afb31caeae3888e02e4f5208bd200812e1e"} Jan 29 07:38:15 crc kubenswrapper[4861]: I0129 07:38:15.848720 4861 generic.go:334] "Generic (PLEG): container finished" podID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerID="549dc79b566af160556253b88ca23afb31caeae3888e02e4f5208bd200812e1e" exitCode=0 Jan 29 07:38:15 crc kubenswrapper[4861]: I0129 07:38:15.848763 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wkh7" event={"ID":"12d5e7cd-4284-4809-838f-4db140a06cf3","Type":"ContainerDied","Data":"549dc79b566af160556253b88ca23afb31caeae3888e02e4f5208bd200812e1e"} Jan 29 07:38:16 crc kubenswrapper[4861]: I0129 07:38:16.857261 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wkh7" event={"ID":"12d5e7cd-4284-4809-838f-4db140a06cf3","Type":"ContainerStarted","Data":"9b26042343bedba9bb71885e21438ff01bffc3be3a0c9f9d1e8fa557a741ad4a"} Jan 29 07:38:16 crc kubenswrapper[4861]: I0129 07:38:16.881669 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4wkh7" podStartSLOduration=2.482386932 podStartE2EDuration="4.881646795s" podCreationTimestamp="2026-01-29 07:38:12 +0000 UTC" firstStartedPulling="2026-01-29 07:38:13.837261069 +0000 UTC m=+3785.508755626" lastFinishedPulling="2026-01-29 07:38:16.236520932 +0000 UTC m=+3787.908015489" observedRunningTime="2026-01-29 07:38:16.875896202 +0000 UTC m=+3788.547390769" watchObservedRunningTime="2026-01-29 07:38:16.881646795 +0000 UTC m=+3788.553141372" Jan 29 07:38:22 crc kubenswrapper[4861]: I0129 07:38:22.478151 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:22 crc kubenswrapper[4861]: I0129 07:38:22.479021 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:22 crc kubenswrapper[4861]: I0129 07:38:22.532153 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:22 crc kubenswrapper[4861]: I0129 07:38:22.977128 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:23 crc kubenswrapper[4861]: I0129 07:38:23.044901 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4wkh7"] Jan 29 07:38:24 crc kubenswrapper[4861]: I0129 07:38:24.116989 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:38:24 crc kubenswrapper[4861]: E0129 07:38:24.118051 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:38:24 crc kubenswrapper[4861]: I0129 07:38:24.919151 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4wkh7" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerName="registry-server" containerID="cri-o://9b26042343bedba9bb71885e21438ff01bffc3be3a0c9f9d1e8fa557a741ad4a" gracePeriod=2 Jan 29 07:38:25 crc kubenswrapper[4861]: I0129 07:38:25.931484 4861 generic.go:334] "Generic (PLEG): container finished" podID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerID="9b26042343bedba9bb71885e21438ff01bffc3be3a0c9f9d1e8fa557a741ad4a" exitCode=0 Jan 29 07:38:25 crc kubenswrapper[4861]: I0129 07:38:25.931537 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wkh7" event={"ID":"12d5e7cd-4284-4809-838f-4db140a06cf3","Type":"ContainerDied","Data":"9b26042343bedba9bb71885e21438ff01bffc3be3a0c9f9d1e8fa557a741ad4a"} Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.453876 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.559052 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-utilities\") pod \"12d5e7cd-4284-4809-838f-4db140a06cf3\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.559192 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r9r2\" (UniqueName: \"kubernetes.io/projected/12d5e7cd-4284-4809-838f-4db140a06cf3-kube-api-access-9r9r2\") pod \"12d5e7cd-4284-4809-838f-4db140a06cf3\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.559261 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-catalog-content\") pod \"12d5e7cd-4284-4809-838f-4db140a06cf3\" (UID: \"12d5e7cd-4284-4809-838f-4db140a06cf3\") " Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.560164 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-utilities" (OuterVolumeSpecName: "utilities") pod "12d5e7cd-4284-4809-838f-4db140a06cf3" (UID: "12d5e7cd-4284-4809-838f-4db140a06cf3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.566008 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12d5e7cd-4284-4809-838f-4db140a06cf3-kube-api-access-9r9r2" (OuterVolumeSpecName: "kube-api-access-9r9r2") pod "12d5e7cd-4284-4809-838f-4db140a06cf3" (UID: "12d5e7cd-4284-4809-838f-4db140a06cf3"). InnerVolumeSpecName "kube-api-access-9r9r2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.620269 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12d5e7cd-4284-4809-838f-4db140a06cf3" (UID: "12d5e7cd-4284-4809-838f-4db140a06cf3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.660820 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.660868 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12d5e7cd-4284-4809-838f-4db140a06cf3-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.660921 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r9r2\" (UniqueName: \"kubernetes.io/projected/12d5e7cd-4284-4809-838f-4db140a06cf3-kube-api-access-9r9r2\") on node \"crc\" DevicePath \"\"" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.940784 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4wkh7" event={"ID":"12d5e7cd-4284-4809-838f-4db140a06cf3","Type":"ContainerDied","Data":"621b5baf9ab6ad6f822c1fbc6c63d579399a18c495da3a33d6c107d7ea1f3f40"} Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.940839 4861 scope.go:117] "RemoveContainer" containerID="9b26042343bedba9bb71885e21438ff01bffc3be3a0c9f9d1e8fa557a741ad4a" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.940971 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4wkh7" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.960628 4861 scope.go:117] "RemoveContainer" containerID="549dc79b566af160556253b88ca23afb31caeae3888e02e4f5208bd200812e1e" Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.991960 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4wkh7"] Jan 29 07:38:26 crc kubenswrapper[4861]: I0129 07:38:26.997601 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4wkh7"] Jan 29 07:38:27 crc kubenswrapper[4861]: I0129 07:38:27.004653 4861 scope.go:117] "RemoveContainer" containerID="2f5a53dd4eceee930d3ca2be268f24a26dc6a17a8e9915f48fb8e1d265cd7a3c" Jan 29 07:38:27 crc kubenswrapper[4861]: I0129 07:38:27.132055 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" path="/var/lib/kubelet/pods/12d5e7cd-4284-4809-838f-4db140a06cf3/volumes" Jan 29 07:38:37 crc kubenswrapper[4861]: I0129 07:38:37.116720 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:38:37 crc kubenswrapper[4861]: E0129 07:38:37.117857 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:38:48 crc kubenswrapper[4861]: I0129 07:38:48.117435 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:38:48 crc kubenswrapper[4861]: E0129 07:38:48.118430 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:39:02 crc kubenswrapper[4861]: I0129 07:39:02.117017 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:39:02 crc kubenswrapper[4861]: E0129 07:39:02.117987 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:39:17 crc kubenswrapper[4861]: I0129 07:39:17.116878 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:39:17 crc kubenswrapper[4861]: E0129 07:39:17.118019 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:39:32 crc kubenswrapper[4861]: I0129 07:39:32.117445 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:39:32 crc kubenswrapper[4861]: E0129 07:39:32.118857 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:39:43 crc kubenswrapper[4861]: I0129 07:39:43.116328 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:39:43 crc kubenswrapper[4861]: E0129 07:39:43.117059 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:39:57 crc kubenswrapper[4861]: I0129 07:39:57.116459 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:39:57 crc kubenswrapper[4861]: E0129 07:39:57.117113 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:40:10 crc kubenswrapper[4861]: I0129 07:40:10.116824 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:40:10 crc kubenswrapper[4861]: E0129 07:40:10.117571 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:40:23 crc kubenswrapper[4861]: I0129 07:40:23.117101 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:40:23 crc kubenswrapper[4861]: E0129 07:40:23.117826 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:40:35 crc kubenswrapper[4861]: I0129 07:40:35.116958 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:40:36 crc kubenswrapper[4861]: I0129 07:40:36.148507 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"34c93b2ac70f8d71d7a7b62ff354f3534e02592875614add98655adb2b6367f7"} Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.182700 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ssn6w"] Jan 29 07:40:49 crc kubenswrapper[4861]: E0129 07:40:49.184901 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerName="extract-utilities" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.184942 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerName="extract-utilities" Jan 29 07:40:49 crc kubenswrapper[4861]: E0129 07:40:49.184979 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerName="extract-content" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.185002 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerName="extract-content" Jan 29 07:40:49 crc kubenswrapper[4861]: E0129 07:40:49.185033 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerName="registry-server" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.185050 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerName="registry-server" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.185484 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="12d5e7cd-4284-4809-838f-4db140a06cf3" containerName="registry-server" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.188361 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.193466 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ssn6w"] Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.334501 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-catalog-content\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.335303 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gn474\" (UniqueName: \"kubernetes.io/projected/24224884-a547-49a8-a8d0-9933c9f78389-kube-api-access-gn474\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.335340 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-utilities\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.436833 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gn474\" (UniqueName: \"kubernetes.io/projected/24224884-a547-49a8-a8d0-9933c9f78389-kube-api-access-gn474\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.436962 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-utilities\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.437017 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-catalog-content\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.437776 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-catalog-content\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.437929 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-utilities\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.463984 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gn474\" (UniqueName: \"kubernetes.io/projected/24224884-a547-49a8-a8d0-9933c9f78389-kube-api-access-gn474\") pod \"community-operators-ssn6w\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:49 crc kubenswrapper[4861]: I0129 07:40:49.555268 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:50 crc kubenswrapper[4861]: I0129 07:40:50.094860 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ssn6w"] Jan 29 07:40:50 crc kubenswrapper[4861]: I0129 07:40:50.276203 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ssn6w" event={"ID":"24224884-a547-49a8-a8d0-9933c9f78389","Type":"ContainerStarted","Data":"9136530db85ebf4a8799325c77a816710f5785caabdbf517c975018bb49203ba"} Jan 29 07:40:51 crc kubenswrapper[4861]: I0129 07:40:51.291649 4861 generic.go:334] "Generic (PLEG): container finished" podID="24224884-a547-49a8-a8d0-9933c9f78389" containerID="97a17ea0f104772a96c9e65b9864a87b770853b073b9ce7b16574879bb6e4e07" exitCode=0 Jan 29 07:40:51 crc kubenswrapper[4861]: I0129 07:40:51.292198 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ssn6w" event={"ID":"24224884-a547-49a8-a8d0-9933c9f78389","Type":"ContainerDied","Data":"97a17ea0f104772a96c9e65b9864a87b770853b073b9ce7b16574879bb6e4e07"} Jan 29 07:40:51 crc kubenswrapper[4861]: I0129 07:40:51.295653 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 07:40:52 crc kubenswrapper[4861]: I0129 07:40:52.301282 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ssn6w" event={"ID":"24224884-a547-49a8-a8d0-9933c9f78389","Type":"ContainerStarted","Data":"345d940621e623cbafccae35ef7c686c325129601a6acfbcb76aa1ecc51b149e"} Jan 29 07:40:53 crc kubenswrapper[4861]: I0129 07:40:53.315751 4861 generic.go:334] "Generic (PLEG): container finished" podID="24224884-a547-49a8-a8d0-9933c9f78389" containerID="345d940621e623cbafccae35ef7c686c325129601a6acfbcb76aa1ecc51b149e" exitCode=0 Jan 29 07:40:53 crc kubenswrapper[4861]: I0129 07:40:53.315803 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ssn6w" event={"ID":"24224884-a547-49a8-a8d0-9933c9f78389","Type":"ContainerDied","Data":"345d940621e623cbafccae35ef7c686c325129601a6acfbcb76aa1ecc51b149e"} Jan 29 07:40:54 crc kubenswrapper[4861]: I0129 07:40:54.326829 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ssn6w" event={"ID":"24224884-a547-49a8-a8d0-9933c9f78389","Type":"ContainerStarted","Data":"4e4b76439a948ab9f7fc0fd2a0d60c1394a44c70e242bf4b89fc5016bf6520d6"} Jan 29 07:40:54 crc kubenswrapper[4861]: I0129 07:40:54.354512 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ssn6w" podStartSLOduration=2.897727236 podStartE2EDuration="5.354495835s" podCreationTimestamp="2026-01-29 07:40:49 +0000 UTC" firstStartedPulling="2026-01-29 07:40:51.295362946 +0000 UTC m=+3942.966857513" lastFinishedPulling="2026-01-29 07:40:53.752131515 +0000 UTC m=+3945.423626112" observedRunningTime="2026-01-29 07:40:54.351710551 +0000 UTC m=+3946.023205108" watchObservedRunningTime="2026-01-29 07:40:54.354495835 +0000 UTC m=+3946.025990392" Jan 29 07:40:59 crc kubenswrapper[4861]: I0129 07:40:59.555530 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:59 crc kubenswrapper[4861]: I0129 07:40:59.556007 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:40:59 crc kubenswrapper[4861]: I0129 07:40:59.603052 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:41:00 crc kubenswrapper[4861]: I0129 07:41:00.433349 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:41:01 crc kubenswrapper[4861]: I0129 07:41:01.556965 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ssn6w"] Jan 29 07:41:02 crc kubenswrapper[4861]: I0129 07:41:02.393751 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ssn6w" podUID="24224884-a547-49a8-a8d0-9933c9f78389" containerName="registry-server" containerID="cri-o://4e4b76439a948ab9f7fc0fd2a0d60c1394a44c70e242bf4b89fc5016bf6520d6" gracePeriod=2 Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.402904 4861 generic.go:334] "Generic (PLEG): container finished" podID="24224884-a547-49a8-a8d0-9933c9f78389" containerID="4e4b76439a948ab9f7fc0fd2a0d60c1394a44c70e242bf4b89fc5016bf6520d6" exitCode=0 Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.402984 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ssn6w" event={"ID":"24224884-a547-49a8-a8d0-9933c9f78389","Type":"ContainerDied","Data":"4e4b76439a948ab9f7fc0fd2a0d60c1394a44c70e242bf4b89fc5016bf6520d6"} Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.497818 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.643662 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-catalog-content\") pod \"24224884-a547-49a8-a8d0-9933c9f78389\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.647330 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-utilities\") pod \"24224884-a547-49a8-a8d0-9933c9f78389\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.648178 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-utilities" (OuterVolumeSpecName: "utilities") pod "24224884-a547-49a8-a8d0-9933c9f78389" (UID: "24224884-a547-49a8-a8d0-9933c9f78389"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.648284 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gn474\" (UniqueName: \"kubernetes.io/projected/24224884-a547-49a8-a8d0-9933c9f78389-kube-api-access-gn474\") pod \"24224884-a547-49a8-a8d0-9933c9f78389\" (UID: \"24224884-a547-49a8-a8d0-9933c9f78389\") " Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.649818 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.660556 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24224884-a547-49a8-a8d0-9933c9f78389-kube-api-access-gn474" (OuterVolumeSpecName: "kube-api-access-gn474") pod "24224884-a547-49a8-a8d0-9933c9f78389" (UID: "24224884-a547-49a8-a8d0-9933c9f78389"). InnerVolumeSpecName "kube-api-access-gn474". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.702104 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24224884-a547-49a8-a8d0-9933c9f78389" (UID: "24224884-a547-49a8-a8d0-9933c9f78389"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.751540 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gn474\" (UniqueName: \"kubernetes.io/projected/24224884-a547-49a8-a8d0-9933c9f78389-kube-api-access-gn474\") on node \"crc\" DevicePath \"\"" Jan 29 07:41:03 crc kubenswrapper[4861]: I0129 07:41:03.751578 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24224884-a547-49a8-a8d0-9933c9f78389-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:41:04 crc kubenswrapper[4861]: I0129 07:41:04.411843 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ssn6w" event={"ID":"24224884-a547-49a8-a8d0-9933c9f78389","Type":"ContainerDied","Data":"9136530db85ebf4a8799325c77a816710f5785caabdbf517c975018bb49203ba"} Jan 29 07:41:04 crc kubenswrapper[4861]: I0129 07:41:04.412264 4861 scope.go:117] "RemoveContainer" containerID="4e4b76439a948ab9f7fc0fd2a0d60c1394a44c70e242bf4b89fc5016bf6520d6" Jan 29 07:41:04 crc kubenswrapper[4861]: I0129 07:41:04.411911 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ssn6w" Jan 29 07:41:04 crc kubenswrapper[4861]: I0129 07:41:04.438044 4861 scope.go:117] "RemoveContainer" containerID="345d940621e623cbafccae35ef7c686c325129601a6acfbcb76aa1ecc51b149e" Jan 29 07:41:04 crc kubenswrapper[4861]: I0129 07:41:04.445928 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ssn6w"] Jan 29 07:41:04 crc kubenswrapper[4861]: I0129 07:41:04.454773 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ssn6w"] Jan 29 07:41:04 crc kubenswrapper[4861]: I0129 07:41:04.464020 4861 scope.go:117] "RemoveContainer" containerID="97a17ea0f104772a96c9e65b9864a87b770853b073b9ce7b16574879bb6e4e07" Jan 29 07:41:05 crc kubenswrapper[4861]: I0129 07:41:05.127186 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24224884-a547-49a8-a8d0-9933c9f78389" path="/var/lib/kubelet/pods/24224884-a547-49a8-a8d0-9933c9f78389/volumes" Jan 29 07:43:00 crc kubenswrapper[4861]: I0129 07:43:00.630015 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:43:00 crc kubenswrapper[4861]: I0129 07:43:00.630626 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:43:30 crc kubenswrapper[4861]: I0129 07:43:30.629567 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:43:30 crc kubenswrapper[4861]: I0129 07:43:30.630530 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:44:00 crc kubenswrapper[4861]: I0129 07:44:00.629456 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:44:00 crc kubenswrapper[4861]: I0129 07:44:00.630261 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:44:00 crc kubenswrapper[4861]: I0129 07:44:00.630331 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:44:00 crc kubenswrapper[4861]: I0129 07:44:00.631356 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"34c93b2ac70f8d71d7a7b62ff354f3534e02592875614add98655adb2b6367f7"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:44:00 crc kubenswrapper[4861]: I0129 07:44:00.631462 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://34c93b2ac70f8d71d7a7b62ff354f3534e02592875614add98655adb2b6367f7" gracePeriod=600 Jan 29 07:44:00 crc kubenswrapper[4861]: I0129 07:44:00.895168 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="34c93b2ac70f8d71d7a7b62ff354f3534e02592875614add98655adb2b6367f7" exitCode=0 Jan 29 07:44:00 crc kubenswrapper[4861]: I0129 07:44:00.895233 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"34c93b2ac70f8d71d7a7b62ff354f3534e02592875614add98655adb2b6367f7"} Jan 29 07:44:00 crc kubenswrapper[4861]: I0129 07:44:00.895323 4861 scope.go:117] "RemoveContainer" containerID="dbb3b9a0a728a524218346ac0b62d57283bc3029bf5c78b8e92a665ff21db051" Jan 29 07:44:01 crc kubenswrapper[4861]: I0129 07:44:01.907458 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963"} Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.338413 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7ch84"] Jan 29 07:44:46 crc kubenswrapper[4861]: E0129 07:44:46.339253 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24224884-a547-49a8-a8d0-9933c9f78389" containerName="extract-utilities" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.339267 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="24224884-a547-49a8-a8d0-9933c9f78389" containerName="extract-utilities" Jan 29 07:44:46 crc kubenswrapper[4861]: E0129 07:44:46.339282 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24224884-a547-49a8-a8d0-9933c9f78389" containerName="registry-server" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.339290 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="24224884-a547-49a8-a8d0-9933c9f78389" containerName="registry-server" Jan 29 07:44:46 crc kubenswrapper[4861]: E0129 07:44:46.339308 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24224884-a547-49a8-a8d0-9933c9f78389" containerName="extract-content" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.339316 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="24224884-a547-49a8-a8d0-9933c9f78389" containerName="extract-content" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.339436 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="24224884-a547-49a8-a8d0-9933c9f78389" containerName="registry-server" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.340441 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.352320 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7ch84"] Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.437120 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6c4b\" (UniqueName: \"kubernetes.io/projected/c23d2778-72c9-4d13-8295-17fdf3bb4015-kube-api-access-r6c4b\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.437426 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-utilities\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.437451 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-catalog-content\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.539124 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6c4b\" (UniqueName: \"kubernetes.io/projected/c23d2778-72c9-4d13-8295-17fdf3bb4015-kube-api-access-r6c4b\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.539177 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-utilities\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.539207 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-catalog-content\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.539695 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-utilities\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.539755 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-catalog-content\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.561498 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6c4b\" (UniqueName: \"kubernetes.io/projected/c23d2778-72c9-4d13-8295-17fdf3bb4015-kube-api-access-r6c4b\") pod \"redhat-operators-7ch84\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.672490 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:46 crc kubenswrapper[4861]: I0129 07:44:46.891846 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7ch84"] Jan 29 07:44:47 crc kubenswrapper[4861]: I0129 07:44:47.280897 4861 generic.go:334] "Generic (PLEG): container finished" podID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerID="a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb" exitCode=0 Jan 29 07:44:47 crc kubenswrapper[4861]: I0129 07:44:47.280956 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ch84" event={"ID":"c23d2778-72c9-4d13-8295-17fdf3bb4015","Type":"ContainerDied","Data":"a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb"} Jan 29 07:44:47 crc kubenswrapper[4861]: I0129 07:44:47.281008 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ch84" event={"ID":"c23d2778-72c9-4d13-8295-17fdf3bb4015","Type":"ContainerStarted","Data":"92062874fb7319b756c7079df74954bc2825d01d9a6ba6e00e593724d712ceaa"} Jan 29 07:44:48 crc kubenswrapper[4861]: I0129 07:44:48.288846 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ch84" event={"ID":"c23d2778-72c9-4d13-8295-17fdf3bb4015","Type":"ContainerStarted","Data":"d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770"} Jan 29 07:44:49 crc kubenswrapper[4861]: I0129 07:44:49.301287 4861 generic.go:334] "Generic (PLEG): container finished" podID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerID="d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770" exitCode=0 Jan 29 07:44:49 crc kubenswrapper[4861]: I0129 07:44:49.301355 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ch84" event={"ID":"c23d2778-72c9-4d13-8295-17fdf3bb4015","Type":"ContainerDied","Data":"d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770"} Jan 29 07:44:50 crc kubenswrapper[4861]: I0129 07:44:50.312528 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ch84" event={"ID":"c23d2778-72c9-4d13-8295-17fdf3bb4015","Type":"ContainerStarted","Data":"56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc"} Jan 29 07:44:50 crc kubenswrapper[4861]: I0129 07:44:50.342369 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7ch84" podStartSLOduration=1.766824232 podStartE2EDuration="4.342338711s" podCreationTimestamp="2026-01-29 07:44:46 +0000 UTC" firstStartedPulling="2026-01-29 07:44:47.283725438 +0000 UTC m=+4178.955219995" lastFinishedPulling="2026-01-29 07:44:49.859239897 +0000 UTC m=+4181.530734474" observedRunningTime="2026-01-29 07:44:50.342066234 +0000 UTC m=+4182.013560861" watchObservedRunningTime="2026-01-29 07:44:50.342338711 +0000 UTC m=+4182.013833308" Jan 29 07:44:56 crc kubenswrapper[4861]: I0129 07:44:56.673594 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:56 crc kubenswrapper[4861]: I0129 07:44:56.674763 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:44:57 crc kubenswrapper[4861]: I0129 07:44:57.741610 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7ch84" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="registry-server" probeResult="failure" output=< Jan 29 07:44:57 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 07:44:57 crc kubenswrapper[4861]: > Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.186595 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l"] Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.188414 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.191356 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.191402 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.200880 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l"] Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.376644 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dh24\" (UniqueName: \"kubernetes.io/projected/01b12670-e056-49b0-a9a3-8680110c1c1c-kube-api-access-4dh24\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.376714 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01b12670-e056-49b0-a9a3-8680110c1c1c-secret-volume\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.377239 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01b12670-e056-49b0-a9a3-8680110c1c1c-config-volume\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.478481 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01b12670-e056-49b0-a9a3-8680110c1c1c-config-volume\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.478667 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dh24\" (UniqueName: \"kubernetes.io/projected/01b12670-e056-49b0-a9a3-8680110c1c1c-kube-api-access-4dh24\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.479113 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01b12670-e056-49b0-a9a3-8680110c1c1c-secret-volume\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.479570 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01b12670-e056-49b0-a9a3-8680110c1c1c-config-volume\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.485460 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01b12670-e056-49b0-a9a3-8680110c1c1c-secret-volume\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.493742 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dh24\" (UniqueName: \"kubernetes.io/projected/01b12670-e056-49b0-a9a3-8680110c1c1c-kube-api-access-4dh24\") pod \"collect-profiles-29494545-m4n6l\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.517587 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:00 crc kubenswrapper[4861]: I0129 07:45:00.941087 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l"] Jan 29 07:45:01 crc kubenswrapper[4861]: I0129 07:45:01.390996 4861 generic.go:334] "Generic (PLEG): container finished" podID="01b12670-e056-49b0-a9a3-8680110c1c1c" containerID="ed85ed353b8a57df9e96bc2bd576de5481b8db5ba9c82ff0189c462b69b225cf" exitCode=0 Jan 29 07:45:01 crc kubenswrapper[4861]: I0129 07:45:01.391101 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" event={"ID":"01b12670-e056-49b0-a9a3-8680110c1c1c","Type":"ContainerDied","Data":"ed85ed353b8a57df9e96bc2bd576de5481b8db5ba9c82ff0189c462b69b225cf"} Jan 29 07:45:01 crc kubenswrapper[4861]: I0129 07:45:01.391314 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" event={"ID":"01b12670-e056-49b0-a9a3-8680110c1c1c","Type":"ContainerStarted","Data":"3632314ac2d6d3ad5e189b748ec01fbeceb07b2f5d3fafe685b11a25d48a67a7"} Jan 29 07:45:02 crc kubenswrapper[4861]: I0129 07:45:02.798213 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:02 crc kubenswrapper[4861]: I0129 07:45:02.914978 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dh24\" (UniqueName: \"kubernetes.io/projected/01b12670-e056-49b0-a9a3-8680110c1c1c-kube-api-access-4dh24\") pod \"01b12670-e056-49b0-a9a3-8680110c1c1c\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " Jan 29 07:45:02 crc kubenswrapper[4861]: I0129 07:45:02.915259 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01b12670-e056-49b0-a9a3-8680110c1c1c-secret-volume\") pod \"01b12670-e056-49b0-a9a3-8680110c1c1c\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " Jan 29 07:45:02 crc kubenswrapper[4861]: I0129 07:45:02.916484 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01b12670-e056-49b0-a9a3-8680110c1c1c-config-volume\") pod \"01b12670-e056-49b0-a9a3-8680110c1c1c\" (UID: \"01b12670-e056-49b0-a9a3-8680110c1c1c\") " Jan 29 07:45:02 crc kubenswrapper[4861]: I0129 07:45:02.917086 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01b12670-e056-49b0-a9a3-8680110c1c1c-config-volume" (OuterVolumeSpecName: "config-volume") pod "01b12670-e056-49b0-a9a3-8680110c1c1c" (UID: "01b12670-e056-49b0-a9a3-8680110c1c1c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:45:02 crc kubenswrapper[4861]: I0129 07:45:02.921989 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01b12670-e056-49b0-a9a3-8680110c1c1c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "01b12670-e056-49b0-a9a3-8680110c1c1c" (UID: "01b12670-e056-49b0-a9a3-8680110c1c1c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 07:45:02 crc kubenswrapper[4861]: I0129 07:45:02.922104 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01b12670-e056-49b0-a9a3-8680110c1c1c-kube-api-access-4dh24" (OuterVolumeSpecName: "kube-api-access-4dh24") pod "01b12670-e056-49b0-a9a3-8680110c1c1c" (UID: "01b12670-e056-49b0-a9a3-8680110c1c1c"). InnerVolumeSpecName "kube-api-access-4dh24". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:45:03 crc kubenswrapper[4861]: I0129 07:45:03.019206 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01b12670-e056-49b0-a9a3-8680110c1c1c-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 07:45:03 crc kubenswrapper[4861]: I0129 07:45:03.019277 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01b12670-e056-49b0-a9a3-8680110c1c1c-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 07:45:03 crc kubenswrapper[4861]: I0129 07:45:03.019294 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dh24\" (UniqueName: \"kubernetes.io/projected/01b12670-e056-49b0-a9a3-8680110c1c1c-kube-api-access-4dh24\") on node \"crc\" DevicePath \"\"" Jan 29 07:45:03 crc kubenswrapper[4861]: I0129 07:45:03.408753 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" event={"ID":"01b12670-e056-49b0-a9a3-8680110c1c1c","Type":"ContainerDied","Data":"3632314ac2d6d3ad5e189b748ec01fbeceb07b2f5d3fafe685b11a25d48a67a7"} Jan 29 07:45:03 crc kubenswrapper[4861]: I0129 07:45:03.408830 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3632314ac2d6d3ad5e189b748ec01fbeceb07b2f5d3fafe685b11a25d48a67a7" Jan 29 07:45:03 crc kubenswrapper[4861]: I0129 07:45:03.408885 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l" Jan 29 07:45:03 crc kubenswrapper[4861]: I0129 07:45:03.883017 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9"] Jan 29 07:45:03 crc kubenswrapper[4861]: I0129 07:45:03.895910 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494500-qjjd9"] Jan 29 07:45:05 crc kubenswrapper[4861]: I0129 07:45:05.128270 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b862adb6-7c39-4a96-bddb-b35306031ebc" path="/var/lib/kubelet/pods/b862adb6-7c39-4a96-bddb-b35306031ebc/volumes" Jan 29 07:45:06 crc kubenswrapper[4861]: I0129 07:45:06.716639 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:45:06 crc kubenswrapper[4861]: I0129 07:45:06.775906 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:45:06 crc kubenswrapper[4861]: I0129 07:45:06.955404 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7ch84"] Jan 29 07:45:08 crc kubenswrapper[4861]: I0129 07:45:08.449692 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7ch84" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="registry-server" containerID="cri-o://56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc" gracePeriod=2 Jan 29 07:45:08 crc kubenswrapper[4861]: I0129 07:45:08.903147 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.024462 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-utilities\") pod \"c23d2778-72c9-4d13-8295-17fdf3bb4015\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.024554 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-catalog-content\") pod \"c23d2778-72c9-4d13-8295-17fdf3bb4015\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.024579 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6c4b\" (UniqueName: \"kubernetes.io/projected/c23d2778-72c9-4d13-8295-17fdf3bb4015-kube-api-access-r6c4b\") pod \"c23d2778-72c9-4d13-8295-17fdf3bb4015\" (UID: \"c23d2778-72c9-4d13-8295-17fdf3bb4015\") " Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.025885 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-utilities" (OuterVolumeSpecName: "utilities") pod "c23d2778-72c9-4d13-8295-17fdf3bb4015" (UID: "c23d2778-72c9-4d13-8295-17fdf3bb4015"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.035337 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c23d2778-72c9-4d13-8295-17fdf3bb4015-kube-api-access-r6c4b" (OuterVolumeSpecName: "kube-api-access-r6c4b") pod "c23d2778-72c9-4d13-8295-17fdf3bb4015" (UID: "c23d2778-72c9-4d13-8295-17fdf3bb4015"). InnerVolumeSpecName "kube-api-access-r6c4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.125655 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.126145 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6c4b\" (UniqueName: \"kubernetes.io/projected/c23d2778-72c9-4d13-8295-17fdf3bb4015-kube-api-access-r6c4b\") on node \"crc\" DevicePath \"\"" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.158346 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c23d2778-72c9-4d13-8295-17fdf3bb4015" (UID: "c23d2778-72c9-4d13-8295-17fdf3bb4015"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.227930 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c23d2778-72c9-4d13-8295-17fdf3bb4015-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.462063 4861 generic.go:334] "Generic (PLEG): container finished" podID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerID="56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc" exitCode=0 Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.462149 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7ch84" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.462167 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ch84" event={"ID":"c23d2778-72c9-4d13-8295-17fdf3bb4015","Type":"ContainerDied","Data":"56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc"} Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.462220 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ch84" event={"ID":"c23d2778-72c9-4d13-8295-17fdf3bb4015","Type":"ContainerDied","Data":"92062874fb7319b756c7079df74954bc2825d01d9a6ba6e00e593724d712ceaa"} Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.462256 4861 scope.go:117] "RemoveContainer" containerID="56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.485046 4861 scope.go:117] "RemoveContainer" containerID="d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.497011 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7ch84"] Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.501617 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7ch84"] Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.513708 4861 scope.go:117] "RemoveContainer" containerID="a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.535901 4861 scope.go:117] "RemoveContainer" containerID="56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc" Jan 29 07:45:09 crc kubenswrapper[4861]: E0129 07:45:09.536717 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc\": container with ID starting with 56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc not found: ID does not exist" containerID="56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.536758 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc"} err="failed to get container status \"56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc\": rpc error: code = NotFound desc = could not find container \"56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc\": container with ID starting with 56b119b0733036db3b5ef112a83da865cc6fad60069efd4dd030da253c4860dc not found: ID does not exist" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.536791 4861 scope.go:117] "RemoveContainer" containerID="d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770" Jan 29 07:45:09 crc kubenswrapper[4861]: E0129 07:45:09.538198 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770\": container with ID starting with d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770 not found: ID does not exist" containerID="d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.538459 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770"} err="failed to get container status \"d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770\": rpc error: code = NotFound desc = could not find container \"d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770\": container with ID starting with d41eafabe54175033463cbf5ec9ad919299c17be8463f096829e6654a3fd9770 not found: ID does not exist" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.538697 4861 scope.go:117] "RemoveContainer" containerID="a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb" Jan 29 07:45:09 crc kubenswrapper[4861]: E0129 07:45:09.539682 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb\": container with ID starting with a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb not found: ID does not exist" containerID="a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb" Jan 29 07:45:09 crc kubenswrapper[4861]: I0129 07:45:09.539712 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb"} err="failed to get container status \"a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb\": rpc error: code = NotFound desc = could not find container \"a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb\": container with ID starting with a306efd481509d6e13860c709bace710be4ceb9ca049a9167f7d91164ffc39fb not found: ID does not exist" Jan 29 07:45:11 crc kubenswrapper[4861]: I0129 07:45:11.125855 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" path="/var/lib/kubelet/pods/c23d2778-72c9-4d13-8295-17fdf3bb4015/volumes" Jan 29 07:45:24 crc kubenswrapper[4861]: I0129 07:45:24.796845 4861 scope.go:117] "RemoveContainer" containerID="8bb2554be01b8fb2aca36b2ce880eea85e470f8fef763c5b4bda80f3548020a2" Jan 29 07:46:00 crc kubenswrapper[4861]: I0129 07:46:00.630000 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:46:00 crc kubenswrapper[4861]: I0129 07:46:00.630864 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:46:30 crc kubenswrapper[4861]: I0129 07:46:30.629718 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:46:30 crc kubenswrapper[4861]: I0129 07:46:30.630658 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:47:00 crc kubenswrapper[4861]: I0129 07:47:00.630137 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:47:00 crc kubenswrapper[4861]: I0129 07:47:00.630783 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:47:00 crc kubenswrapper[4861]: I0129 07:47:00.630859 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:47:00 crc kubenswrapper[4861]: I0129 07:47:00.631541 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:47:00 crc kubenswrapper[4861]: I0129 07:47:00.631609 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" gracePeriod=600 Jan 29 07:47:00 crc kubenswrapper[4861]: E0129 07:47:00.776336 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:47:01 crc kubenswrapper[4861]: I0129 07:47:01.474697 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" exitCode=0 Jan 29 07:47:01 crc kubenswrapper[4861]: I0129 07:47:01.474785 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963"} Jan 29 07:47:01 crc kubenswrapper[4861]: I0129 07:47:01.475487 4861 scope.go:117] "RemoveContainer" containerID="34c93b2ac70f8d71d7a7b62ff354f3534e02592875614add98655adb2b6367f7" Jan 29 07:47:01 crc kubenswrapper[4861]: I0129 07:47:01.476720 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:47:01 crc kubenswrapper[4861]: E0129 07:47:01.477920 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:47:13 crc kubenswrapper[4861]: I0129 07:47:13.116118 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:47:13 crc kubenswrapper[4861]: E0129 07:47:13.116831 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:47:28 crc kubenswrapper[4861]: I0129 07:47:28.117224 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:47:28 crc kubenswrapper[4861]: E0129 07:47:28.118433 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:47:42 crc kubenswrapper[4861]: I0129 07:47:42.117096 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:47:42 crc kubenswrapper[4861]: E0129 07:47:42.118031 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:47:56 crc kubenswrapper[4861]: I0129 07:47:56.116591 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:47:56 crc kubenswrapper[4861]: E0129 07:47:56.117374 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:48:08 crc kubenswrapper[4861]: I0129 07:48:08.116785 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:48:08 crc kubenswrapper[4861]: E0129 07:48:08.117895 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:48:22 crc kubenswrapper[4861]: I0129 07:48:22.116832 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:48:22 crc kubenswrapper[4861]: E0129 07:48:22.117691 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:48:33 crc kubenswrapper[4861]: I0129 07:48:33.117618 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:48:33 crc kubenswrapper[4861]: E0129 07:48:33.118788 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:48:44 crc kubenswrapper[4861]: I0129 07:48:44.117063 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:48:44 crc kubenswrapper[4861]: E0129 07:48:44.117640 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:48:56 crc kubenswrapper[4861]: I0129 07:48:56.123012 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:48:56 crc kubenswrapper[4861]: E0129 07:48:56.124018 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:49:09 crc kubenswrapper[4861]: I0129 07:49:09.121419 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:49:09 crc kubenswrapper[4861]: E0129 07:49:09.122209 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:49:22 crc kubenswrapper[4861]: I0129 07:49:22.116740 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:49:22 crc kubenswrapper[4861]: E0129 07:49:22.117587 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:49:35 crc kubenswrapper[4861]: I0129 07:49:35.117241 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:49:35 crc kubenswrapper[4861]: E0129 07:49:35.118365 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:49:47 crc kubenswrapper[4861]: I0129 07:49:47.116373 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:49:47 crc kubenswrapper[4861]: E0129 07:49:47.117428 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:49:59 crc kubenswrapper[4861]: I0129 07:49:59.121642 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:49:59 crc kubenswrapper[4861]: E0129 07:49:59.123591 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:50:11 crc kubenswrapper[4861]: I0129 07:50:11.116471 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:50:11 crc kubenswrapper[4861]: E0129 07:50:11.117323 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:50:25 crc kubenswrapper[4861]: I0129 07:50:25.118014 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:50:25 crc kubenswrapper[4861]: E0129 07:50:25.119343 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:50:40 crc kubenswrapper[4861]: I0129 07:50:40.116714 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:50:40 crc kubenswrapper[4861]: E0129 07:50:40.117853 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:50:53 crc kubenswrapper[4861]: I0129 07:50:53.116666 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:50:53 crc kubenswrapper[4861]: E0129 07:50:53.117428 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:51:06 crc kubenswrapper[4861]: I0129 07:51:06.116513 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:51:06 crc kubenswrapper[4861]: E0129 07:51:06.117692 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:51:18 crc kubenswrapper[4861]: I0129 07:51:18.129946 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:51:18 crc kubenswrapper[4861]: E0129 07:51:18.130747 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:51:29 crc kubenswrapper[4861]: I0129 07:51:29.124962 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:51:29 crc kubenswrapper[4861]: E0129 07:51:29.126147 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.255215 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8jgsb"] Jan 29 07:51:34 crc kubenswrapper[4861]: E0129 07:51:34.256777 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="registry-server" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.256805 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="registry-server" Jan 29 07:51:34 crc kubenswrapper[4861]: E0129 07:51:34.256840 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="extract-utilities" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.256854 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="extract-utilities" Jan 29 07:51:34 crc kubenswrapper[4861]: E0129 07:51:34.256908 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="extract-content" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.256925 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="extract-content" Jan 29 07:51:34 crc kubenswrapper[4861]: E0129 07:51:34.256958 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b12670-e056-49b0-a9a3-8680110c1c1c" containerName="collect-profiles" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.256975 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b12670-e056-49b0-a9a3-8680110c1c1c" containerName="collect-profiles" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.257269 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="01b12670-e056-49b0-a9a3-8680110c1c1c" containerName="collect-profiles" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.257297 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c23d2778-72c9-4d13-8295-17fdf3bb4015" containerName="registry-server" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.259726 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.275840 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8jgsb"] Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.369031 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-catalog-content\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.369105 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-utilities\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.369139 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6kmd\" (UniqueName: \"kubernetes.io/projected/8491c41c-285d-46c8-8c8a-a72348589912-kube-api-access-x6kmd\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.470370 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-catalog-content\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.470419 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-utilities\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.470445 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6kmd\" (UniqueName: \"kubernetes.io/projected/8491c41c-285d-46c8-8c8a-a72348589912-kube-api-access-x6kmd\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.470924 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-catalog-content\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.471142 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-utilities\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.490310 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6kmd\" (UniqueName: \"kubernetes.io/projected/8491c41c-285d-46c8-8c8a-a72348589912-kube-api-access-x6kmd\") pod \"certified-operators-8jgsb\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:34 crc kubenswrapper[4861]: I0129 07:51:34.601365 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:35 crc kubenswrapper[4861]: I0129 07:51:35.138723 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8jgsb"] Jan 29 07:51:35 crc kubenswrapper[4861]: I0129 07:51:35.898390 4861 generic.go:334] "Generic (PLEG): container finished" podID="8491c41c-285d-46c8-8c8a-a72348589912" containerID="955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a" exitCode=0 Jan 29 07:51:35 crc kubenswrapper[4861]: I0129 07:51:35.898591 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jgsb" event={"ID":"8491c41c-285d-46c8-8c8a-a72348589912","Type":"ContainerDied","Data":"955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a"} Jan 29 07:51:35 crc kubenswrapper[4861]: I0129 07:51:35.898635 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jgsb" event={"ID":"8491c41c-285d-46c8-8c8a-a72348589912","Type":"ContainerStarted","Data":"f24fce2a04b99b40aec27221bf24a3e24f1483098b63423240ecc0bac52b086f"} Jan 29 07:51:35 crc kubenswrapper[4861]: I0129 07:51:35.901388 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.649178 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-56d58"] Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.651030 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.655168 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-56d58"] Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.707330 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-utilities\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.707391 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-catalog-content\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.707443 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfmpm\" (UniqueName: \"kubernetes.io/projected/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-kube-api-access-qfmpm\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.809000 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-utilities\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.809337 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-catalog-content\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.809561 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfmpm\" (UniqueName: \"kubernetes.io/projected/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-kube-api-access-qfmpm\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.809629 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-utilities\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.809744 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-catalog-content\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.834934 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfmpm\" (UniqueName: \"kubernetes.io/projected/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-kube-api-access-qfmpm\") pod \"community-operators-56d58\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.907058 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jgsb" event={"ID":"8491c41c-285d-46c8-8c8a-a72348589912","Type":"ContainerStarted","Data":"e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d"} Jan 29 07:51:36 crc kubenswrapper[4861]: I0129 07:51:36.983602 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.241108 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-74w8l"] Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.244013 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.253916 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-74w8l"] Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.316666 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-utilities\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.316748 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-catalog-content\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.316802 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmkv4\" (UniqueName: \"kubernetes.io/projected/5b70f915-7801-4239-98d9-6e4806099e43-kube-api-access-fmkv4\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.418512 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-utilities\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.418589 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-catalog-content\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.418659 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmkv4\" (UniqueName: \"kubernetes.io/projected/5b70f915-7801-4239-98d9-6e4806099e43-kube-api-access-fmkv4\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.419101 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-utilities\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.419116 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-catalog-content\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.434882 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmkv4\" (UniqueName: \"kubernetes.io/projected/5b70f915-7801-4239-98d9-6e4806099e43-kube-api-access-fmkv4\") pod \"redhat-marketplace-74w8l\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.474969 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-56d58"] Jan 29 07:51:37 crc kubenswrapper[4861]: W0129 07:51:37.489871 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f3028a9_32d4_4ac0_ab8c_a2c1122d3959.slice/crio-44339b2fabfe35fbbfba5e889f1f3a28560de3667b2fbc88462cbef85b8f4cee WatchSource:0}: Error finding container 44339b2fabfe35fbbfba5e889f1f3a28560de3667b2fbc88462cbef85b8f4cee: Status 404 returned error can't find the container with id 44339b2fabfe35fbbfba5e889f1f3a28560de3667b2fbc88462cbef85b8f4cee Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.567409 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.919364 4861 generic.go:334] "Generic (PLEG): container finished" podID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerID="c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047" exitCode=0 Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.919469 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56d58" event={"ID":"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959","Type":"ContainerDied","Data":"c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047"} Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.919517 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56d58" event={"ID":"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959","Type":"ContainerStarted","Data":"44339b2fabfe35fbbfba5e889f1f3a28560de3667b2fbc88462cbef85b8f4cee"} Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.930117 4861 generic.go:334] "Generic (PLEG): container finished" podID="8491c41c-285d-46c8-8c8a-a72348589912" containerID="e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d" exitCode=0 Jan 29 07:51:37 crc kubenswrapper[4861]: I0129 07:51:37.930162 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jgsb" event={"ID":"8491c41c-285d-46c8-8c8a-a72348589912","Type":"ContainerDied","Data":"e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d"} Jan 29 07:51:38 crc kubenswrapper[4861]: W0129 07:51:38.039272 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b70f915_7801_4239_98d9_6e4806099e43.slice/crio-7314b2d0331518fd0ff73eb9996a988d328ab5482b167c84f5b75c443a22955f WatchSource:0}: Error finding container 7314b2d0331518fd0ff73eb9996a988d328ab5482b167c84f5b75c443a22955f: Status 404 returned error can't find the container with id 7314b2d0331518fd0ff73eb9996a988d328ab5482b167c84f5b75c443a22955f Jan 29 07:51:38 crc kubenswrapper[4861]: I0129 07:51:38.044865 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-74w8l"] Jan 29 07:51:38 crc kubenswrapper[4861]: I0129 07:51:38.939174 4861 generic.go:334] "Generic (PLEG): container finished" podID="5b70f915-7801-4239-98d9-6e4806099e43" containerID="0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619" exitCode=0 Jan 29 07:51:38 crc kubenswrapper[4861]: I0129 07:51:38.939364 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-74w8l" event={"ID":"5b70f915-7801-4239-98d9-6e4806099e43","Type":"ContainerDied","Data":"0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619"} Jan 29 07:51:38 crc kubenswrapper[4861]: I0129 07:51:38.939579 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-74w8l" event={"ID":"5b70f915-7801-4239-98d9-6e4806099e43","Type":"ContainerStarted","Data":"7314b2d0331518fd0ff73eb9996a988d328ab5482b167c84f5b75c443a22955f"} Jan 29 07:51:38 crc kubenswrapper[4861]: I0129 07:51:38.945237 4861 generic.go:334] "Generic (PLEG): container finished" podID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerID="2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb" exitCode=0 Jan 29 07:51:38 crc kubenswrapper[4861]: I0129 07:51:38.945318 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56d58" event={"ID":"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959","Type":"ContainerDied","Data":"2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb"} Jan 29 07:51:38 crc kubenswrapper[4861]: I0129 07:51:38.948120 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jgsb" event={"ID":"8491c41c-285d-46c8-8c8a-a72348589912","Type":"ContainerStarted","Data":"cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c"} Jan 29 07:51:39 crc kubenswrapper[4861]: I0129 07:51:39.957478 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56d58" event={"ID":"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959","Type":"ContainerStarted","Data":"e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a"} Jan 29 07:51:39 crc kubenswrapper[4861]: I0129 07:51:39.960918 4861 generic.go:334] "Generic (PLEG): container finished" podID="5b70f915-7801-4239-98d9-6e4806099e43" containerID="343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69" exitCode=0 Jan 29 07:51:39 crc kubenswrapper[4861]: I0129 07:51:39.961977 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-74w8l" event={"ID":"5b70f915-7801-4239-98d9-6e4806099e43","Type":"ContainerDied","Data":"343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69"} Jan 29 07:51:39 crc kubenswrapper[4861]: I0129 07:51:39.989968 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8jgsb" podStartSLOduration=3.57226671 podStartE2EDuration="5.989947251s" podCreationTimestamp="2026-01-29 07:51:34 +0000 UTC" firstStartedPulling="2026-01-29 07:51:35.90079282 +0000 UTC m=+4587.572287407" lastFinishedPulling="2026-01-29 07:51:38.318473381 +0000 UTC m=+4589.989967948" observedRunningTime="2026-01-29 07:51:39.013876687 +0000 UTC m=+4590.685371294" watchObservedRunningTime="2026-01-29 07:51:39.989947251 +0000 UTC m=+4591.661441818" Jan 29 07:51:40 crc kubenswrapper[4861]: I0129 07:51:40.014193 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-56d58" podStartSLOduration=2.518311867 podStartE2EDuration="4.014173447s" podCreationTimestamp="2026-01-29 07:51:36 +0000 UTC" firstStartedPulling="2026-01-29 07:51:37.922363762 +0000 UTC m=+4589.593858319" lastFinishedPulling="2026-01-29 07:51:39.418225352 +0000 UTC m=+4591.089719899" observedRunningTime="2026-01-29 07:51:39.984655432 +0000 UTC m=+4591.656150009" watchObservedRunningTime="2026-01-29 07:51:40.014173447 +0000 UTC m=+4591.685668014" Jan 29 07:51:40 crc kubenswrapper[4861]: I0129 07:51:40.975223 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-74w8l" event={"ID":"5b70f915-7801-4239-98d9-6e4806099e43","Type":"ContainerStarted","Data":"6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77"} Jan 29 07:51:41 crc kubenswrapper[4861]: I0129 07:51:41.001783 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-74w8l" podStartSLOduration=2.557592471 podStartE2EDuration="4.001749864s" podCreationTimestamp="2026-01-29 07:51:37 +0000 UTC" firstStartedPulling="2026-01-29 07:51:38.940664735 +0000 UTC m=+4590.612159292" lastFinishedPulling="2026-01-29 07:51:40.384822108 +0000 UTC m=+4592.056316685" observedRunningTime="2026-01-29 07:51:40.997769119 +0000 UTC m=+4592.669263696" watchObservedRunningTime="2026-01-29 07:51:41.001749864 +0000 UTC m=+4592.673244441" Jan 29 07:51:41 crc kubenswrapper[4861]: I0129 07:51:41.116780 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:51:41 crc kubenswrapper[4861]: E0129 07:51:41.117043 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:51:44 crc kubenswrapper[4861]: I0129 07:51:44.602139 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:44 crc kubenswrapper[4861]: I0129 07:51:44.602774 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:44 crc kubenswrapper[4861]: I0129 07:51:44.673521 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:45 crc kubenswrapper[4861]: I0129 07:51:45.070743 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:45 crc kubenswrapper[4861]: I0129 07:51:45.234731 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8jgsb"] Jan 29 07:51:46 crc kubenswrapper[4861]: I0129 07:51:46.984629 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:46 crc kubenswrapper[4861]: I0129 07:51:46.986159 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.024048 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8jgsb" podUID="8491c41c-285d-46c8-8c8a-a72348589912" containerName="registry-server" containerID="cri-o://cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c" gracePeriod=2 Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.065472 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.502706 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.568066 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.568564 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.606700 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-utilities\") pod \"8491c41c-285d-46c8-8c8a-a72348589912\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.606798 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-catalog-content\") pod \"8491c41c-285d-46c8-8c8a-a72348589912\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.606942 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6kmd\" (UniqueName: \"kubernetes.io/projected/8491c41c-285d-46c8-8c8a-a72348589912-kube-api-access-x6kmd\") pod \"8491c41c-285d-46c8-8c8a-a72348589912\" (UID: \"8491c41c-285d-46c8-8c8a-a72348589912\") " Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.609566 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-utilities" (OuterVolumeSpecName: "utilities") pod "8491c41c-285d-46c8-8c8a-a72348589912" (UID: "8491c41c-285d-46c8-8c8a-a72348589912"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.615521 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8491c41c-285d-46c8-8c8a-a72348589912-kube-api-access-x6kmd" (OuterVolumeSpecName: "kube-api-access-x6kmd") pod "8491c41c-285d-46c8-8c8a-a72348589912" (UID: "8491c41c-285d-46c8-8c8a-a72348589912"). InnerVolumeSpecName "kube-api-access-x6kmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.629910 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.660851 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8491c41c-285d-46c8-8c8a-a72348589912" (UID: "8491c41c-285d-46c8-8c8a-a72348589912"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.709337 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6kmd\" (UniqueName: \"kubernetes.io/projected/8491c41c-285d-46c8-8c8a-a72348589912-kube-api-access-x6kmd\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.709382 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:47 crc kubenswrapper[4861]: I0129 07:51:47.709398 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8491c41c-285d-46c8-8c8a-a72348589912-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.034844 4861 generic.go:334] "Generic (PLEG): container finished" podID="8491c41c-285d-46c8-8c8a-a72348589912" containerID="cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c" exitCode=0 Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.034950 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jgsb" event={"ID":"8491c41c-285d-46c8-8c8a-a72348589912","Type":"ContainerDied","Data":"cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c"} Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.035003 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8jgsb" event={"ID":"8491c41c-285d-46c8-8c8a-a72348589912","Type":"ContainerDied","Data":"f24fce2a04b99b40aec27221bf24a3e24f1483098b63423240ecc0bac52b086f"} Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.035021 4861 scope.go:117] "RemoveContainer" containerID="cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.035020 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8jgsb" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.057504 4861 scope.go:117] "RemoveContainer" containerID="e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.096742 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8jgsb"] Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.100419 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.106775 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8jgsb"] Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.112454 4861 scope.go:117] "RemoveContainer" containerID="955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.112579 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.130296 4861 scope.go:117] "RemoveContainer" containerID="cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c" Jan 29 07:51:48 crc kubenswrapper[4861]: E0129 07:51:48.130771 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c\": container with ID starting with cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c not found: ID does not exist" containerID="cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.130830 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c"} err="failed to get container status \"cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c\": rpc error: code = NotFound desc = could not find container \"cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c\": container with ID starting with cd728ceea80961d9bb00b57e6e1eb96791deea3411c7126bd0a455cf2b776a4c not found: ID does not exist" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.130861 4861 scope.go:117] "RemoveContainer" containerID="e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d" Jan 29 07:51:48 crc kubenswrapper[4861]: E0129 07:51:48.131150 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d\": container with ID starting with e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d not found: ID does not exist" containerID="e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.131189 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d"} err="failed to get container status \"e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d\": rpc error: code = NotFound desc = could not find container \"e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d\": container with ID starting with e92dd0b443ff0f1f3573bd1b284144290ef8918e4e2ec97c496cff6c09cada1d not found: ID does not exist" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.131208 4861 scope.go:117] "RemoveContainer" containerID="955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a" Jan 29 07:51:48 crc kubenswrapper[4861]: E0129 07:51:48.131535 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a\": container with ID starting with 955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a not found: ID does not exist" containerID="955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a" Jan 29 07:51:48 crc kubenswrapper[4861]: I0129 07:51:48.131590 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a"} err="failed to get container status \"955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a\": rpc error: code = NotFound desc = could not find container \"955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a\": container with ID starting with 955047465a449df02c90286e4a538304aefd648bdbbab9822ab2cf3747b2d35a not found: ID does not exist" Jan 29 07:51:49 crc kubenswrapper[4861]: I0129 07:51:49.131137 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8491c41c-285d-46c8-8c8a-a72348589912" path="/var/lib/kubelet/pods/8491c41c-285d-46c8-8c8a-a72348589912/volumes" Jan 29 07:51:51 crc kubenswrapper[4861]: I0129 07:51:51.631992 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-74w8l"] Jan 29 07:51:51 crc kubenswrapper[4861]: I0129 07:51:51.632447 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-74w8l" podUID="5b70f915-7801-4239-98d9-6e4806099e43" containerName="registry-server" containerID="cri-o://6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77" gracePeriod=2 Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.048615 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.073977 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-catalog-content\") pod \"5b70f915-7801-4239-98d9-6e4806099e43\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.074189 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-utilities\") pod \"5b70f915-7801-4239-98d9-6e4806099e43\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.074237 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmkv4\" (UniqueName: \"kubernetes.io/projected/5b70f915-7801-4239-98d9-6e4806099e43-kube-api-access-fmkv4\") pod \"5b70f915-7801-4239-98d9-6e4806099e43\" (UID: \"5b70f915-7801-4239-98d9-6e4806099e43\") " Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.075203 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-utilities" (OuterVolumeSpecName: "utilities") pod "5b70f915-7801-4239-98d9-6e4806099e43" (UID: "5b70f915-7801-4239-98d9-6e4806099e43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.075251 4861 generic.go:334] "Generic (PLEG): container finished" podID="5b70f915-7801-4239-98d9-6e4806099e43" containerID="6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77" exitCode=0 Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.075290 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-74w8l" event={"ID":"5b70f915-7801-4239-98d9-6e4806099e43","Type":"ContainerDied","Data":"6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77"} Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.075321 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-74w8l" event={"ID":"5b70f915-7801-4239-98d9-6e4806099e43","Type":"ContainerDied","Data":"7314b2d0331518fd0ff73eb9996a988d328ab5482b167c84f5b75c443a22955f"} Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.075321 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-74w8l" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.075339 4861 scope.go:117] "RemoveContainer" containerID="6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.112791 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b70f915-7801-4239-98d9-6e4806099e43" (UID: "5b70f915-7801-4239-98d9-6e4806099e43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.118151 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b70f915-7801-4239-98d9-6e4806099e43-kube-api-access-fmkv4" (OuterVolumeSpecName: "kube-api-access-fmkv4") pod "5b70f915-7801-4239-98d9-6e4806099e43" (UID: "5b70f915-7801-4239-98d9-6e4806099e43"). InnerVolumeSpecName "kube-api-access-fmkv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.139144 4861 scope.go:117] "RemoveContainer" containerID="343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.176458 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.176491 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b70f915-7801-4239-98d9-6e4806099e43-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.176501 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmkv4\" (UniqueName: \"kubernetes.io/projected/5b70f915-7801-4239-98d9-6e4806099e43-kube-api-access-fmkv4\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.179844 4861 scope.go:117] "RemoveContainer" containerID="0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.197804 4861 scope.go:117] "RemoveContainer" containerID="6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77" Jan 29 07:51:52 crc kubenswrapper[4861]: E0129 07:51:52.198376 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77\": container with ID starting with 6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77 not found: ID does not exist" containerID="6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.198430 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77"} err="failed to get container status \"6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77\": rpc error: code = NotFound desc = could not find container \"6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77\": container with ID starting with 6339f482215deaf13edbf1c00e5e9231aab516638c33685e3b2883deba397d77 not found: ID does not exist" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.198463 4861 scope.go:117] "RemoveContainer" containerID="343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69" Jan 29 07:51:52 crc kubenswrapper[4861]: E0129 07:51:52.199143 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69\": container with ID starting with 343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69 not found: ID does not exist" containerID="343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.199190 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69"} err="failed to get container status \"343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69\": rpc error: code = NotFound desc = could not find container \"343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69\": container with ID starting with 343b8dc89a01302e8f8d95450558f048ebe00d418f81f400450a2a69ae4bdf69 not found: ID does not exist" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.199215 4861 scope.go:117] "RemoveContainer" containerID="0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619" Jan 29 07:51:52 crc kubenswrapper[4861]: E0129 07:51:52.199635 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619\": container with ID starting with 0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619 not found: ID does not exist" containerID="0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.199662 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619"} err="failed to get container status \"0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619\": rpc error: code = NotFound desc = could not find container \"0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619\": container with ID starting with 0c50abae4726553af5fd0e1e0beb8fc621bfb12913d56a9f2b5ae53a509c2619 not found: ID does not exist" Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.425571 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-74w8l"] Jan 29 07:51:52 crc kubenswrapper[4861]: I0129 07:51:52.432323 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-74w8l"] Jan 29 07:51:53 crc kubenswrapper[4861]: I0129 07:51:53.130673 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b70f915-7801-4239-98d9-6e4806099e43" path="/var/lib/kubelet/pods/5b70f915-7801-4239-98d9-6e4806099e43/volumes" Jan 29 07:51:54 crc kubenswrapper[4861]: I0129 07:51:54.228561 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-56d58"] Jan 29 07:51:54 crc kubenswrapper[4861]: I0129 07:51:54.228775 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-56d58" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerName="registry-server" containerID="cri-o://e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a" gracePeriod=2 Jan 29 07:51:54 crc kubenswrapper[4861]: I0129 07:51:54.925613 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.019325 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-catalog-content\") pod \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.019411 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-utilities\") pod \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.019461 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfmpm\" (UniqueName: \"kubernetes.io/projected/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-kube-api-access-qfmpm\") pod \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\" (UID: \"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959\") " Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.020546 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-utilities" (OuterVolumeSpecName: "utilities") pod "4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" (UID: "4f3028a9-32d4-4ac0-ab8c-a2c1122d3959"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.035859 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-kube-api-access-qfmpm" (OuterVolumeSpecName: "kube-api-access-qfmpm") pod "4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" (UID: "4f3028a9-32d4-4ac0-ab8c-a2c1122d3959"). InnerVolumeSpecName "kube-api-access-qfmpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.087145 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" (UID: "4f3028a9-32d4-4ac0-ab8c-a2c1122d3959"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.109778 4861 generic.go:334] "Generic (PLEG): container finished" podID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerID="e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a" exitCode=0 Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.109847 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-56d58" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.109845 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56d58" event={"ID":"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959","Type":"ContainerDied","Data":"e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a"} Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.110127 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-56d58" event={"ID":"4f3028a9-32d4-4ac0-ab8c-a2c1122d3959","Type":"ContainerDied","Data":"44339b2fabfe35fbbfba5e889f1f3a28560de3667b2fbc88462cbef85b8f4cee"} Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.110146 4861 scope.go:117] "RemoveContainer" containerID="e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.116483 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:51:55 crc kubenswrapper[4861]: E0129 07:51:55.116782 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.121057 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfmpm\" (UniqueName: \"kubernetes.io/projected/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-kube-api-access-qfmpm\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.121111 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.121131 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.135286 4861 scope.go:117] "RemoveContainer" containerID="2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.148036 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-56d58"] Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.153270 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-56d58"] Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.177343 4861 scope.go:117] "RemoveContainer" containerID="c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.201786 4861 scope.go:117] "RemoveContainer" containerID="e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a" Jan 29 07:51:55 crc kubenswrapper[4861]: E0129 07:51:55.202385 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a\": container with ID starting with e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a not found: ID does not exist" containerID="e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.202434 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a"} err="failed to get container status \"e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a\": rpc error: code = NotFound desc = could not find container \"e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a\": container with ID starting with e213b9224d591de87062421f6fdb83704fcac5345fff1b552e0f3fb0b8ea947a not found: ID does not exist" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.202466 4861 scope.go:117] "RemoveContainer" containerID="2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb" Jan 29 07:51:55 crc kubenswrapper[4861]: E0129 07:51:55.202964 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb\": container with ID starting with 2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb not found: ID does not exist" containerID="2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.203016 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb"} err="failed to get container status \"2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb\": rpc error: code = NotFound desc = could not find container \"2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb\": container with ID starting with 2d301cb5572aaeaeaa99a8875b2991a2238a05bab398206c895bb8a0a1d09fcb not found: ID does not exist" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.203052 4861 scope.go:117] "RemoveContainer" containerID="c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047" Jan 29 07:51:55 crc kubenswrapper[4861]: E0129 07:51:55.203676 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047\": container with ID starting with c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047 not found: ID does not exist" containerID="c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047" Jan 29 07:51:55 crc kubenswrapper[4861]: I0129 07:51:55.203745 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047"} err="failed to get container status \"c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047\": rpc error: code = NotFound desc = could not find container \"c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047\": container with ID starting with c677c323f2cf8278b03d35ccae881045800b2778b156187657db96b10619c047 not found: ID does not exist" Jan 29 07:51:57 crc kubenswrapper[4861]: I0129 07:51:57.130130 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" path="/var/lib/kubelet/pods/4f3028a9-32d4-4ac0-ab8c-a2c1122d3959/volumes" Jan 29 07:52:08 crc kubenswrapper[4861]: I0129 07:52:08.116904 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:52:09 crc kubenswrapper[4861]: I0129 07:52:09.230288 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"9f6877e337631f1868c599d5c4b3a382e0ff7bf36ed5bf0ed6ba9714e754b1cf"} Jan 29 07:54:30 crc kubenswrapper[4861]: I0129 07:54:30.630148 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:54:30 crc kubenswrapper[4861]: I0129 07:54:30.630824 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:54:42 crc kubenswrapper[4861]: I0129 07:54:42.979838 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-8hmnw"] Jan 29 07:54:42 crc kubenswrapper[4861]: I0129 07:54:42.991133 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-8hmnw"] Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.093646 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-qlr4k"] Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094236 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b70f915-7801-4239-98d9-6e4806099e43" containerName="extract-utilities" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094275 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b70f915-7801-4239-98d9-6e4806099e43" containerName="extract-utilities" Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094322 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b70f915-7801-4239-98d9-6e4806099e43" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094336 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b70f915-7801-4239-98d9-6e4806099e43" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094368 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8491c41c-285d-46c8-8c8a-a72348589912" containerName="extract-utilities" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094382 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8491c41c-285d-46c8-8c8a-a72348589912" containerName="extract-utilities" Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094402 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8491c41c-285d-46c8-8c8a-a72348589912" containerName="extract-content" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094415 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8491c41c-285d-46c8-8c8a-a72348589912" containerName="extract-content" Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094435 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8491c41c-285d-46c8-8c8a-a72348589912" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094448 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8491c41c-285d-46c8-8c8a-a72348589912" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094471 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerName="extract-utilities" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094484 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerName="extract-utilities" Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094503 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b70f915-7801-4239-98d9-6e4806099e43" containerName="extract-content" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094517 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b70f915-7801-4239-98d9-6e4806099e43" containerName="extract-content" Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094540 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerName="extract-content" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094553 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerName="extract-content" Jan 29 07:54:43 crc kubenswrapper[4861]: E0129 07:54:43.094571 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094584 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094853 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f3028a9-32d4-4ac0-ab8c-a2c1122d3959" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094877 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="8491c41c-285d-46c8-8c8a-a72348589912" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.094901 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b70f915-7801-4239-98d9-6e4806099e43" containerName="registry-server" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.095843 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.099502 4861 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-9zmr6" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.099839 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.101793 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.102009 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.129454 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccc1ed57-3186-442c-8132-0f6a7fcc1526" path="/var/lib/kubelet/pods/ccc1ed57-3186-442c-8132-0f6a7fcc1526/volumes" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.130810 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-qlr4k"] Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.212760 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/526883c3-55b7-4dd6-96bf-d46673fb3072-node-mnt\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.212893 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbw48\" (UniqueName: \"kubernetes.io/projected/526883c3-55b7-4dd6-96bf-d46673fb3072-kube-api-access-rbw48\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.212919 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/526883c3-55b7-4dd6-96bf-d46673fb3072-crc-storage\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.314545 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbw48\" (UniqueName: \"kubernetes.io/projected/526883c3-55b7-4dd6-96bf-d46673fb3072-kube-api-access-rbw48\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.314593 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/526883c3-55b7-4dd6-96bf-d46673fb3072-crc-storage\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.314678 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/526883c3-55b7-4dd6-96bf-d46673fb3072-node-mnt\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.314937 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/526883c3-55b7-4dd6-96bf-d46673fb3072-node-mnt\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.315788 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/526883c3-55b7-4dd6-96bf-d46673fb3072-crc-storage\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.338840 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbw48\" (UniqueName: \"kubernetes.io/projected/526883c3-55b7-4dd6-96bf-d46673fb3072-kube-api-access-rbw48\") pod \"crc-storage-crc-qlr4k\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.418158 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:43 crc kubenswrapper[4861]: I0129 07:54:43.831259 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-qlr4k"] Jan 29 07:54:44 crc kubenswrapper[4861]: I0129 07:54:44.716229 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-qlr4k" event={"ID":"526883c3-55b7-4dd6-96bf-d46673fb3072","Type":"ContainerStarted","Data":"b963d92e9fec10bd5afebccf5d776dff5bf2b6686c9655ba9022641fd6bc8824"} Jan 29 07:54:44 crc kubenswrapper[4861]: I0129 07:54:44.716698 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-qlr4k" event={"ID":"526883c3-55b7-4dd6-96bf-d46673fb3072","Type":"ContainerStarted","Data":"10534aadde031d79de63c1adaf927cfd477d676ec08270ea16900da8d89e4046"} Jan 29 07:54:44 crc kubenswrapper[4861]: I0129 07:54:44.736162 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="crc-storage/crc-storage-crc-qlr4k" podStartSLOduration=1.18157635 podStartE2EDuration="1.736144169s" podCreationTimestamp="2026-01-29 07:54:43 +0000 UTC" firstStartedPulling="2026-01-29 07:54:43.848364282 +0000 UTC m=+4775.519858869" lastFinishedPulling="2026-01-29 07:54:44.402932081 +0000 UTC m=+4776.074426688" observedRunningTime="2026-01-29 07:54:44.731356753 +0000 UTC m=+4776.402851320" watchObservedRunningTime="2026-01-29 07:54:44.736144169 +0000 UTC m=+4776.407638726" Jan 29 07:54:45 crc kubenswrapper[4861]: I0129 07:54:45.727012 4861 generic.go:334] "Generic (PLEG): container finished" podID="526883c3-55b7-4dd6-96bf-d46673fb3072" containerID="b963d92e9fec10bd5afebccf5d776dff5bf2b6686c9655ba9022641fd6bc8824" exitCode=0 Jan 29 07:54:45 crc kubenswrapper[4861]: I0129 07:54:45.727103 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-qlr4k" event={"ID":"526883c3-55b7-4dd6-96bf-d46673fb3072","Type":"ContainerDied","Data":"b963d92e9fec10bd5afebccf5d776dff5bf2b6686c9655ba9022641fd6bc8824"} Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.348911 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.479762 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/526883c3-55b7-4dd6-96bf-d46673fb3072-node-mnt\") pod \"526883c3-55b7-4dd6-96bf-d46673fb3072\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.479899 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/526883c3-55b7-4dd6-96bf-d46673fb3072-crc-storage\") pod \"526883c3-55b7-4dd6-96bf-d46673fb3072\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.479923 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/526883c3-55b7-4dd6-96bf-d46673fb3072-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "526883c3-55b7-4dd6-96bf-d46673fb3072" (UID: "526883c3-55b7-4dd6-96bf-d46673fb3072"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.479953 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbw48\" (UniqueName: \"kubernetes.io/projected/526883c3-55b7-4dd6-96bf-d46673fb3072-kube-api-access-rbw48\") pod \"526883c3-55b7-4dd6-96bf-d46673fb3072\" (UID: \"526883c3-55b7-4dd6-96bf-d46673fb3072\") " Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.480556 4861 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/526883c3-55b7-4dd6-96bf-d46673fb3072-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.488478 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/526883c3-55b7-4dd6-96bf-d46673fb3072-kube-api-access-rbw48" (OuterVolumeSpecName: "kube-api-access-rbw48") pod "526883c3-55b7-4dd6-96bf-d46673fb3072" (UID: "526883c3-55b7-4dd6-96bf-d46673fb3072"). InnerVolumeSpecName "kube-api-access-rbw48". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.510891 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/526883c3-55b7-4dd6-96bf-d46673fb3072-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "526883c3-55b7-4dd6-96bf-d46673fb3072" (UID: "526883c3-55b7-4dd6-96bf-d46673fb3072"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.581902 4861 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/526883c3-55b7-4dd6-96bf-d46673fb3072-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.581942 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbw48\" (UniqueName: \"kubernetes.io/projected/526883c3-55b7-4dd6-96bf-d46673fb3072-kube-api-access-rbw48\") on node \"crc\" DevicePath \"\"" Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.746257 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-qlr4k" event={"ID":"526883c3-55b7-4dd6-96bf-d46673fb3072","Type":"ContainerDied","Data":"10534aadde031d79de63c1adaf927cfd477d676ec08270ea16900da8d89e4046"} Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.746305 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10534aadde031d79de63c1adaf927cfd477d676ec08270ea16900da8d89e4046" Jan 29 07:54:47 crc kubenswrapper[4861]: I0129 07:54:47.746878 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qlr4k" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.708996 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-qlr4k"] Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.719235 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-qlr4k"] Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.823695 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-x2h7z"] Jan 29 07:54:48 crc kubenswrapper[4861]: E0129 07:54:48.824940 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="526883c3-55b7-4dd6-96bf-d46673fb3072" containerName="storage" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.824974 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="526883c3-55b7-4dd6-96bf-d46673fb3072" containerName="storage" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.825302 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="526883c3-55b7-4dd6-96bf-d46673fb3072" containerName="storage" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.826211 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.828970 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.829227 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.833001 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.833302 4861 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-9zmr6" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.833364 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-x2h7z"] Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.898922 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpdtn\" (UniqueName: \"kubernetes.io/projected/2e596f2c-77ac-4bb4-b0ff-74418acc5612-kube-api-access-zpdtn\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.898986 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2e596f2c-77ac-4bb4-b0ff-74418acc5612-crc-storage\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:48 crc kubenswrapper[4861]: I0129 07:54:48.899420 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2e596f2c-77ac-4bb4-b0ff-74418acc5612-node-mnt\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.000410 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpdtn\" (UniqueName: \"kubernetes.io/projected/2e596f2c-77ac-4bb4-b0ff-74418acc5612-kube-api-access-zpdtn\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.000473 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2e596f2c-77ac-4bb4-b0ff-74418acc5612-crc-storage\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.000599 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2e596f2c-77ac-4bb4-b0ff-74418acc5612-node-mnt\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.000822 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2e596f2c-77ac-4bb4-b0ff-74418acc5612-node-mnt\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.001423 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2e596f2c-77ac-4bb4-b0ff-74418acc5612-crc-storage\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.036437 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpdtn\" (UniqueName: \"kubernetes.io/projected/2e596f2c-77ac-4bb4-b0ff-74418acc5612-kube-api-access-zpdtn\") pod \"crc-storage-crc-x2h7z\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.132161 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="526883c3-55b7-4dd6-96bf-d46673fb3072" path="/var/lib/kubelet/pods/526883c3-55b7-4dd6-96bf-d46673fb3072/volumes" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.143687 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.429753 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-x2h7z"] Jan 29 07:54:49 crc kubenswrapper[4861]: I0129 07:54:49.765021 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x2h7z" event={"ID":"2e596f2c-77ac-4bb4-b0ff-74418acc5612","Type":"ContainerStarted","Data":"8c146868d1bcb74d346324999d5695c892b484e853805916590c3cdfbcc280b0"} Jan 29 07:54:50 crc kubenswrapper[4861]: I0129 07:54:50.777882 4861 generic.go:334] "Generic (PLEG): container finished" podID="2e596f2c-77ac-4bb4-b0ff-74418acc5612" containerID="616c7cbdfd53b9c58bd28610fc7c6c89831d5cbfd2a2bf6b243de2eff7045b3b" exitCode=0 Jan 29 07:54:50 crc kubenswrapper[4861]: I0129 07:54:50.778043 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x2h7z" event={"ID":"2e596f2c-77ac-4bb4-b0ff-74418acc5612","Type":"ContainerDied","Data":"616c7cbdfd53b9c58bd28610fc7c6c89831d5cbfd2a2bf6b243de2eff7045b3b"} Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.182566 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.248785 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2e596f2c-77ac-4bb4-b0ff-74418acc5612-node-mnt\") pod \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.248902 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2e596f2c-77ac-4bb4-b0ff-74418acc5612-crc-storage\") pod \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.248924 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e596f2c-77ac-4bb4-b0ff-74418acc5612-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "2e596f2c-77ac-4bb4-b0ff-74418acc5612" (UID: "2e596f2c-77ac-4bb4-b0ff-74418acc5612"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.249015 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpdtn\" (UniqueName: \"kubernetes.io/projected/2e596f2c-77ac-4bb4-b0ff-74418acc5612-kube-api-access-zpdtn\") pod \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\" (UID: \"2e596f2c-77ac-4bb4-b0ff-74418acc5612\") " Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.249314 4861 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2e596f2c-77ac-4bb4-b0ff-74418acc5612-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.256487 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e596f2c-77ac-4bb4-b0ff-74418acc5612-kube-api-access-zpdtn" (OuterVolumeSpecName: "kube-api-access-zpdtn") pod "2e596f2c-77ac-4bb4-b0ff-74418acc5612" (UID: "2e596f2c-77ac-4bb4-b0ff-74418acc5612"). InnerVolumeSpecName "kube-api-access-zpdtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.268183 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e596f2c-77ac-4bb4-b0ff-74418acc5612-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "2e596f2c-77ac-4bb4-b0ff-74418acc5612" (UID: "2e596f2c-77ac-4bb4-b0ff-74418acc5612"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.350761 4861 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2e596f2c-77ac-4bb4-b0ff-74418acc5612-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.350810 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpdtn\" (UniqueName: \"kubernetes.io/projected/2e596f2c-77ac-4bb4-b0ff-74418acc5612-kube-api-access-zpdtn\") on node \"crc\" DevicePath \"\"" Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.793766 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x2h7z" event={"ID":"2e596f2c-77ac-4bb4-b0ff-74418acc5612","Type":"ContainerDied","Data":"8c146868d1bcb74d346324999d5695c892b484e853805916590c3cdfbcc280b0"} Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.793820 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c146868d1bcb74d346324999d5695c892b484e853805916590c3cdfbcc280b0" Jan 29 07:54:52 crc kubenswrapper[4861]: I0129 07:54:52.793879 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x2h7z" Jan 29 07:55:00 crc kubenswrapper[4861]: I0129 07:55:00.630398 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:55:00 crc kubenswrapper[4861]: I0129 07:55:00.631115 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:55:25 crc kubenswrapper[4861]: I0129 07:55:25.055940 4861 scope.go:117] "RemoveContainer" containerID="e2605ba51f074b0a77e2fdc954faacbeadf1b142896145745b9f4e66415a294a" Jan 29 07:55:30 crc kubenswrapper[4861]: I0129 07:55:30.629343 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:55:30 crc kubenswrapper[4861]: I0129 07:55:30.630130 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:55:30 crc kubenswrapper[4861]: I0129 07:55:30.630184 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:55:30 crc kubenswrapper[4861]: I0129 07:55:30.630858 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9f6877e337631f1868c599d5c4b3a382e0ff7bf36ed5bf0ed6ba9714e754b1cf"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:55:30 crc kubenswrapper[4861]: I0129 07:55:30.630930 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://9f6877e337631f1868c599d5c4b3a382e0ff7bf36ed5bf0ed6ba9714e754b1cf" gracePeriod=600 Jan 29 07:55:30 crc kubenswrapper[4861]: E0129 07:55:30.756782 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fc70726_e8f8_40d8_b31f_2853e3e856d7.slice/crio-9f6877e337631f1868c599d5c4b3a382e0ff7bf36ed5bf0ed6ba9714e754b1cf.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fc70726_e8f8_40d8_b31f_2853e3e856d7.slice/crio-conmon-9f6877e337631f1868c599d5c4b3a382e0ff7bf36ed5bf0ed6ba9714e754b1cf.scope\": RecentStats: unable to find data in memory cache]" Jan 29 07:55:31 crc kubenswrapper[4861]: I0129 07:55:31.159312 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="9f6877e337631f1868c599d5c4b3a382e0ff7bf36ed5bf0ed6ba9714e754b1cf" exitCode=0 Jan 29 07:55:31 crc kubenswrapper[4861]: I0129 07:55:31.159410 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"9f6877e337631f1868c599d5c4b3a382e0ff7bf36ed5bf0ed6ba9714e754b1cf"} Jan 29 07:55:31 crc kubenswrapper[4861]: I0129 07:55:31.160647 4861 scope.go:117] "RemoveContainer" containerID="ab7c39d5cb86f350802da6fc0c62a56b74853543164ef375e502c08f6117a963" Jan 29 07:55:31 crc kubenswrapper[4861]: I0129 07:55:31.161002 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e"} Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.136062 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ljnq2"] Jan 29 07:56:03 crc kubenswrapper[4861]: E0129 07:56:03.136844 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e596f2c-77ac-4bb4-b0ff-74418acc5612" containerName="storage" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.136861 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e596f2c-77ac-4bb4-b0ff-74418acc5612" containerName="storage" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.137060 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e596f2c-77ac-4bb4-b0ff-74418acc5612" containerName="storage" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.138243 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.148147 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ljnq2"] Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.216866 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gchlt\" (UniqueName: \"kubernetes.io/projected/c8d75fec-e9c8-44ce-aa43-03c0c396462a-kube-api-access-gchlt\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.217001 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-utilities\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.217235 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-catalog-content\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.318685 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gchlt\" (UniqueName: \"kubernetes.io/projected/c8d75fec-e9c8-44ce-aa43-03c0c396462a-kube-api-access-gchlt\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.318777 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-utilities\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.318859 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-catalog-content\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.319447 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-catalog-content\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.319808 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-utilities\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.342426 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gchlt\" (UniqueName: \"kubernetes.io/projected/c8d75fec-e9c8-44ce-aa43-03c0c396462a-kube-api-access-gchlt\") pod \"redhat-operators-ljnq2\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.464338 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:03 crc kubenswrapper[4861]: I0129 07:56:03.771499 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ljnq2"] Jan 29 07:56:04 crc kubenswrapper[4861]: I0129 07:56:04.492340 4861 generic.go:334] "Generic (PLEG): container finished" podID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerID="55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa" exitCode=0 Jan 29 07:56:04 crc kubenswrapper[4861]: I0129 07:56:04.492398 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljnq2" event={"ID":"c8d75fec-e9c8-44ce-aa43-03c0c396462a","Type":"ContainerDied","Data":"55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa"} Jan 29 07:56:04 crc kubenswrapper[4861]: I0129 07:56:04.492449 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljnq2" event={"ID":"c8d75fec-e9c8-44ce-aa43-03c0c396462a","Type":"ContainerStarted","Data":"59cadfc8473b0b236244497466c37869ad8a2a94b2ad1a2e0199906589dec194"} Jan 29 07:56:06 crc kubenswrapper[4861]: I0129 07:56:06.511782 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljnq2" event={"ID":"c8d75fec-e9c8-44ce-aa43-03c0c396462a","Type":"ContainerStarted","Data":"55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc"} Jan 29 07:56:07 crc kubenswrapper[4861]: I0129 07:56:07.531476 4861 generic.go:334] "Generic (PLEG): container finished" podID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerID="55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc" exitCode=0 Jan 29 07:56:07 crc kubenswrapper[4861]: I0129 07:56:07.531543 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljnq2" event={"ID":"c8d75fec-e9c8-44ce-aa43-03c0c396462a","Type":"ContainerDied","Data":"55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc"} Jan 29 07:56:08 crc kubenswrapper[4861]: I0129 07:56:08.543299 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljnq2" event={"ID":"c8d75fec-e9c8-44ce-aa43-03c0c396462a","Type":"ContainerStarted","Data":"5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad"} Jan 29 07:56:08 crc kubenswrapper[4861]: I0129 07:56:08.576692 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ljnq2" podStartSLOduration=1.913708632 podStartE2EDuration="5.576663086s" podCreationTimestamp="2026-01-29 07:56:03 +0000 UTC" firstStartedPulling="2026-01-29 07:56:04.493849469 +0000 UTC m=+4856.165344036" lastFinishedPulling="2026-01-29 07:56:08.156803933 +0000 UTC m=+4859.828298490" observedRunningTime="2026-01-29 07:56:08.567639319 +0000 UTC m=+4860.239133876" watchObservedRunningTime="2026-01-29 07:56:08.576663086 +0000 UTC m=+4860.248157683" Jan 29 07:56:13 crc kubenswrapper[4861]: I0129 07:56:13.465343 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:13 crc kubenswrapper[4861]: I0129 07:56:13.466233 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:14 crc kubenswrapper[4861]: I0129 07:56:14.533926 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ljnq2" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="registry-server" probeResult="failure" output=< Jan 29 07:56:14 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 07:56:14 crc kubenswrapper[4861]: > Jan 29 07:56:23 crc kubenswrapper[4861]: I0129 07:56:23.551105 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:23 crc kubenswrapper[4861]: I0129 07:56:23.630823 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:23 crc kubenswrapper[4861]: I0129 07:56:23.801620 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ljnq2"] Jan 29 07:56:24 crc kubenswrapper[4861]: I0129 07:56:24.680240 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ljnq2" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="registry-server" containerID="cri-o://5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad" gracePeriod=2 Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.194284 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.264757 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gchlt\" (UniqueName: \"kubernetes.io/projected/c8d75fec-e9c8-44ce-aa43-03c0c396462a-kube-api-access-gchlt\") pod \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.264987 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-catalog-content\") pod \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.265021 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-utilities\") pod \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\" (UID: \"c8d75fec-e9c8-44ce-aa43-03c0c396462a\") " Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.271949 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8d75fec-e9c8-44ce-aa43-03c0c396462a-kube-api-access-gchlt" (OuterVolumeSpecName: "kube-api-access-gchlt") pod "c8d75fec-e9c8-44ce-aa43-03c0c396462a" (UID: "c8d75fec-e9c8-44ce-aa43-03c0c396462a"). InnerVolumeSpecName "kube-api-access-gchlt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.274750 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-utilities" (OuterVolumeSpecName: "utilities") pod "c8d75fec-e9c8-44ce-aa43-03c0c396462a" (UID: "c8d75fec-e9c8-44ce-aa43-03c0c396462a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.366776 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.366844 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gchlt\" (UniqueName: \"kubernetes.io/projected/c8d75fec-e9c8-44ce-aa43-03c0c396462a-kube-api-access-gchlt\") on node \"crc\" DevicePath \"\"" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.454705 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c8d75fec-e9c8-44ce-aa43-03c0c396462a" (UID: "c8d75fec-e9c8-44ce-aa43-03c0c396462a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.468226 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8d75fec-e9c8-44ce-aa43-03c0c396462a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.690680 4861 generic.go:334] "Generic (PLEG): container finished" podID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerID="5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad" exitCode=0 Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.690776 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljnq2" event={"ID":"c8d75fec-e9c8-44ce-aa43-03c0c396462a","Type":"ContainerDied","Data":"5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad"} Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.690829 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljnq2" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.690866 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljnq2" event={"ID":"c8d75fec-e9c8-44ce-aa43-03c0c396462a","Type":"ContainerDied","Data":"59cadfc8473b0b236244497466c37869ad8a2a94b2ad1a2e0199906589dec194"} Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.690903 4861 scope.go:117] "RemoveContainer" containerID="5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.715416 4861 scope.go:117] "RemoveContainer" containerID="55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.754761 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ljnq2"] Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.760113 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ljnq2"] Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.762836 4861 scope.go:117] "RemoveContainer" containerID="55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.805730 4861 scope.go:117] "RemoveContainer" containerID="5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad" Jan 29 07:56:25 crc kubenswrapper[4861]: E0129 07:56:25.806414 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad\": container with ID starting with 5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad not found: ID does not exist" containerID="5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.806508 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad"} err="failed to get container status \"5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad\": rpc error: code = NotFound desc = could not find container \"5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad\": container with ID starting with 5be54c0038e2edec5cfb419a403362cf48d1293b91c6984b3eee1ed4a41232ad not found: ID does not exist" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.806571 4861 scope.go:117] "RemoveContainer" containerID="55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc" Jan 29 07:56:25 crc kubenswrapper[4861]: E0129 07:56:25.807133 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc\": container with ID starting with 55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc not found: ID does not exist" containerID="55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.807209 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc"} err="failed to get container status \"55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc\": rpc error: code = NotFound desc = could not find container \"55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc\": container with ID starting with 55abe0eb0c9832cabda6881ca3005048cf20748f544d5f1b2a3850aeb5f6e7fc not found: ID does not exist" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.807256 4861 scope.go:117] "RemoveContainer" containerID="55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa" Jan 29 07:56:25 crc kubenswrapper[4861]: E0129 07:56:25.807675 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa\": container with ID starting with 55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa not found: ID does not exist" containerID="55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa" Jan 29 07:56:25 crc kubenswrapper[4861]: I0129 07:56:25.807749 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa"} err="failed to get container status \"55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa\": rpc error: code = NotFound desc = could not find container \"55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa\": container with ID starting with 55a52dfe8793ec07d7460680f3c444e3b7d75969f8361531680ed447d373aeaa not found: ID does not exist" Jan 29 07:56:27 crc kubenswrapper[4861]: I0129 07:56:27.126559 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" path="/var/lib/kubelet/pods/c8d75fec-e9c8-44ce-aa43-03c0c396462a/volumes" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.209329 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-wbqdd"] Jan 29 07:56:53 crc kubenswrapper[4861]: E0129 07:56:53.210125 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="registry-server" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.210139 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="registry-server" Jan 29 07:56:53 crc kubenswrapper[4861]: E0129 07:56:53.210158 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="extract-content" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.210164 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="extract-content" Jan 29 07:56:53 crc kubenswrapper[4861]: E0129 07:56:53.210176 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="extract-utilities" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.210182 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="extract-utilities" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.210316 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8d75fec-e9c8-44ce-aa43-03c0c396462a" containerName="registry-server" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.211030 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.212827 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.213150 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.213171 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.215230 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-9mljh" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.218388 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-pdcxr"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.219935 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.221496 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.226211 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-wbqdd"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.244963 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-pdcxr"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.323880 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-847t8\" (UniqueName: \"kubernetes.io/projected/7d0421b8-8d9a-4412-8d21-cc166718ab71-kube-api-access-847t8\") pod \"dnsmasq-dns-5986db9b4f-wbqdd\" (UID: \"7d0421b8-8d9a-4412-8d21-cc166718ab71\") " pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.323985 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.324024 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d0421b8-8d9a-4412-8d21-cc166718ab71-config\") pod \"dnsmasq-dns-5986db9b4f-wbqdd\" (UID: \"7d0421b8-8d9a-4412-8d21-cc166718ab71\") " pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.324102 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx5ml\" (UniqueName: \"kubernetes.io/projected/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-kube-api-access-sx5ml\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.324139 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-config\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.397175 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-pdcxr"] Jan 29 07:56:53 crc kubenswrapper[4861]: E0129 07:56:53.397607 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-sx5ml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" podUID="9dcca8fe-38da-4ee3-a291-f88bb5a1cb87" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.422570 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95587bc99-brqk5"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.425483 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.425883 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx5ml\" (UniqueName: \"kubernetes.io/projected/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-kube-api-access-sx5ml\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.425932 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-config\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.426011 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-847t8\" (UniqueName: \"kubernetes.io/projected/7d0421b8-8d9a-4412-8d21-cc166718ab71-kube-api-access-847t8\") pod \"dnsmasq-dns-5986db9b4f-wbqdd\" (UID: \"7d0421b8-8d9a-4412-8d21-cc166718ab71\") " pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.426037 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.426082 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d0421b8-8d9a-4412-8d21-cc166718ab71-config\") pod \"dnsmasq-dns-5986db9b4f-wbqdd\" (UID: \"7d0421b8-8d9a-4412-8d21-cc166718ab71\") " pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.426857 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d0421b8-8d9a-4412-8d21-cc166718ab71-config\") pod \"dnsmasq-dns-5986db9b4f-wbqdd\" (UID: \"7d0421b8-8d9a-4412-8d21-cc166718ab71\") " pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.427052 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-config\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.429822 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.435101 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-brqk5"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.448736 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx5ml\" (UniqueName: \"kubernetes.io/projected/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-kube-api-access-sx5ml\") pod \"dnsmasq-dns-56bbd59dc5-pdcxr\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.471712 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-847t8\" (UniqueName: \"kubernetes.io/projected/7d0421b8-8d9a-4412-8d21-cc166718ab71-kube-api-access-847t8\") pod \"dnsmasq-dns-5986db9b4f-wbqdd\" (UID: \"7d0421b8-8d9a-4412-8d21-cc166718ab71\") " pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.527008 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppsfj\" (UniqueName: \"kubernetes.io/projected/d4fbe1c9-5cf9-4716-8367-e003d526fe53-kube-api-access-ppsfj\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.527057 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-config\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.527135 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-dns-svc\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.535884 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.636484 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppsfj\" (UniqueName: \"kubernetes.io/projected/d4fbe1c9-5cf9-4716-8367-e003d526fe53-kube-api-access-ppsfj\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.636781 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-config\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.636823 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-dns-svc\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.637736 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-dns-svc\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.638009 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-config\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.659331 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppsfj\" (UniqueName: \"kubernetes.io/projected/d4fbe1c9-5cf9-4716-8367-e003d526fe53-kube-api-access-ppsfj\") pod \"dnsmasq-dns-95587bc99-brqk5\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.741715 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.801401 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-wbqdd"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.833773 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-4t9r7"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.835232 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.845982 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-wbqdd"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.856772 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-4t9r7"] Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.931247 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" event={"ID":"7d0421b8-8d9a-4412-8d21-cc166718ab71","Type":"ContainerStarted","Data":"d927a4433b428dc5c4a10f347aab04a1b997f1d82fdcaa3dc065eea49d2c933a"} Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.931305 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.941614 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.941668 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-config\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.941742 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dkth\" (UniqueName: \"kubernetes.io/projected/79aab840-0e43-4ef0-8fba-793c022f49c4-kube-api-access-5dkth\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:53 crc kubenswrapper[4861]: I0129 07:56:53.942763 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.042772 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sx5ml\" (UniqueName: \"kubernetes.io/projected/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-kube-api-access-sx5ml\") pod \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.043293 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-config\") pod \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.043425 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-dns-svc\") pod \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\" (UID: \"9dcca8fe-38da-4ee3-a291-f88bb5a1cb87\") " Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.043618 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.043653 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-config\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.043749 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dkth\" (UniqueName: \"kubernetes.io/projected/79aab840-0e43-4ef0-8fba-793c022f49c4-kube-api-access-5dkth\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.044143 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-config" (OuterVolumeSpecName: "config") pod "9dcca8fe-38da-4ee3-a291-f88bb5a1cb87" (UID: "9dcca8fe-38da-4ee3-a291-f88bb5a1cb87"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.044402 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9dcca8fe-38da-4ee3-a291-f88bb5a1cb87" (UID: "9dcca8fe-38da-4ee3-a291-f88bb5a1cb87"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.044706 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.044739 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-config\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.055490 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-kube-api-access-sx5ml" (OuterVolumeSpecName: "kube-api-access-sx5ml") pod "9dcca8fe-38da-4ee3-a291-f88bb5a1cb87" (UID: "9dcca8fe-38da-4ee3-a291-f88bb5a1cb87"). InnerVolumeSpecName "kube-api-access-sx5ml". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.060579 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dkth\" (UniqueName: \"kubernetes.io/projected/79aab840-0e43-4ef0-8fba-793c022f49c4-kube-api-access-5dkth\") pod \"dnsmasq-dns-5d79f765b5-4t9r7\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.145303 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.145347 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sx5ml\" (UniqueName: \"kubernetes.io/projected/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-kube-api-access-sx5ml\") on node \"crc\" DevicePath \"\"" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.145363 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87-config\") on node \"crc\" DevicePath \"\"" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.153875 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.269310 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-brqk5"] Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.602014 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.605722 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.611640 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.611997 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.612319 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.612531 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.612714 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.613067 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mctnn" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.613062 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.615407 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.672180 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-4t9r7"] Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.759790 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.760092 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/285529a3-bf50-4815-b38e-e95a0d291fb6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.760242 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.760379 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-config-data\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.760476 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.763298 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.763820 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.763900 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.763940 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/285529a3-bf50-4815-b38e-e95a0d291fb6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.764060 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jgf7\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-kube-api-access-2jgf7\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.764120 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.865248 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-config-data\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.865304 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.865351 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.865375 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.867189 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.867720 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/285529a3-bf50-4815-b38e-e95a0d291fb6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.867118 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.866445 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.866614 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-config-data\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.867761 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jgf7\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-kube-api-access-2jgf7\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.867870 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.868514 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.868562 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.868593 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/285529a3-bf50-4815-b38e-e95a0d291fb6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.869131 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.869872 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.871959 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/285529a3-bf50-4815-b38e-e95a0d291fb6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.875502 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.875967 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.876041 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/84fb7c1aa0e6c235ee3373db0d2c7415ab4d1e3824fb691c7d47a18ee7d0cead/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.876336 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.881745 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/285529a3-bf50-4815-b38e-e95a0d291fb6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.891357 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jgf7\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-kube-api-access-2jgf7\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.915338 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"rabbitmq-server-0\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.933697 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.946728 4861 generic.go:334] "Generic (PLEG): container finished" podID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" containerID="eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee" exitCode=0 Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.946841 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-brqk5" event={"ID":"d4fbe1c9-5cf9-4716-8367-e003d526fe53","Type":"ContainerDied","Data":"eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee"} Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.946876 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-brqk5" event={"ID":"d4fbe1c9-5cf9-4716-8367-e003d526fe53","Type":"ContainerStarted","Data":"1c547391ce0ed8b24408d7d41b2e77634f78a027cca5934d2588ea04e1b630d9"} Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.951675 4861 generic.go:334] "Generic (PLEG): container finished" podID="79aab840-0e43-4ef0-8fba-793c022f49c4" containerID="75f4904c6c16510c59b8908e665a078cdcb432cf08bee83aa03b3f0561d6dd8a" exitCode=0 Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.951771 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" event={"ID":"79aab840-0e43-4ef0-8fba-793c022f49c4","Type":"ContainerDied","Data":"75f4904c6c16510c59b8908e665a078cdcb432cf08bee83aa03b3f0561d6dd8a"} Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.951811 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" event={"ID":"79aab840-0e43-4ef0-8fba-793c022f49c4","Type":"ContainerStarted","Data":"79f02e91a31e6dc67ad3baad4506483cebce6368b5ba092bf85108c72a67bdc1"} Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.967566 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d0421b8-8d9a-4412-8d21-cc166718ab71" containerID="10f1d53619563782490e71f1931637bbb75ab7e8867cd9b314d9a9eea79b4545" exitCode=0 Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.967677 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-pdcxr" Jan 29 07:56:54 crc kubenswrapper[4861]: I0129 07:56:54.969190 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" event={"ID":"7d0421b8-8d9a-4412-8d21-cc166718ab71","Type":"ContainerDied","Data":"10f1d53619563782490e71f1931637bbb75ab7e8867cd9b314d9a9eea79b4545"} Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.060778 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.099178 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.099338 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.102701 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.102764 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.102982 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.103065 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-6zc8k" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.103310 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.103585 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.104206 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.143705 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-pdcxr"] Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.146675 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-pdcxr"] Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.190938 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.190999 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191018 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191049 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191064 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e61fc48-3390-4b35-956f-843772ead36e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191094 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191114 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jmp6\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-kube-api-access-7jmp6\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191137 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191170 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191212 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.191239 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e61fc48-3390-4b35-956f-843772ead36e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: E0129 07:56:55.200110 4861 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 29 07:56:55 crc kubenswrapper[4861]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/d4fbe1c9-5cf9-4716-8367-e003d526fe53/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 29 07:56:55 crc kubenswrapper[4861]: > podSandboxID="1c547391ce0ed8b24408d7d41b2e77634f78a027cca5934d2588ea04e1b630d9" Jan 29 07:56:55 crc kubenswrapper[4861]: E0129 07:56:55.200254 4861 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 07:56:55 crc kubenswrapper[4861]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8chc6h5bh56fh546hb7hc8h67h5bchffh577h697h5b5h5bdh59bhf6hf4h558hb5h578h595h5cchfbh644h59ch7fh654h547h587h5cbh5d5h8fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ppsfj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-95587bc99-brqk5_openstack(d4fbe1c9-5cf9-4716-8367-e003d526fe53): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/d4fbe1c9-5cf9-4716-8367-e003d526fe53/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 29 07:56:55 crc kubenswrapper[4861]: > logger="UnhandledError" Jan 29 07:56:55 crc kubenswrapper[4861]: E0129 07:56:55.201890 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/d4fbe1c9-5cf9-4716-8367-e003d526fe53/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-95587bc99-brqk5" podUID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.273852 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292430 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292483 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292525 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292551 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e61fc48-3390-4b35-956f-843772ead36e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292578 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292606 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292622 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292649 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292691 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e61fc48-3390-4b35-956f-843772ead36e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292705 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.292720 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jmp6\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-kube-api-access-7jmp6\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.293504 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.294644 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.294890 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.295346 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.296884 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.300713 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.301563 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e61fc48-3390-4b35-956f-843772ead36e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.302585 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.302618 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9d6c6648f58821c9c66f6b3e45e2571ce0b1381467c7b67dc15dc959d23e85e3/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.303818 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.308113 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e61fc48-3390-4b35-956f-843772ead36e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.313456 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jmp6\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-kube-api-access-7jmp6\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.327653 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.393852 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d0421b8-8d9a-4412-8d21-cc166718ab71-config\") pod \"7d0421b8-8d9a-4412-8d21-cc166718ab71\" (UID: \"7d0421b8-8d9a-4412-8d21-cc166718ab71\") " Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.394217 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-847t8\" (UniqueName: \"kubernetes.io/projected/7d0421b8-8d9a-4412-8d21-cc166718ab71-kube-api-access-847t8\") pod \"7d0421b8-8d9a-4412-8d21-cc166718ab71\" (UID: \"7d0421b8-8d9a-4412-8d21-cc166718ab71\") " Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.397648 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d0421b8-8d9a-4412-8d21-cc166718ab71-kube-api-access-847t8" (OuterVolumeSpecName: "kube-api-access-847t8") pod "7d0421b8-8d9a-4412-8d21-cc166718ab71" (UID: "7d0421b8-8d9a-4412-8d21-cc166718ab71"). InnerVolumeSpecName "kube-api-access-847t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.418356 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d0421b8-8d9a-4412-8d21-cc166718ab71-config" (OuterVolumeSpecName: "config") pod "7d0421b8-8d9a-4412-8d21-cc166718ab71" (UID: "7d0421b8-8d9a-4412-8d21-cc166718ab71"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.454121 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.497191 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-847t8\" (UniqueName: \"kubernetes.io/projected/7d0421b8-8d9a-4412-8d21-cc166718ab71-kube-api-access-847t8\") on node \"crc\" DevicePath \"\"" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.497260 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d0421b8-8d9a-4412-8d21-cc166718ab71-config\") on node \"crc\" DevicePath \"\"" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.526559 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.906901 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 29 07:56:55 crc kubenswrapper[4861]: E0129 07:56:55.907716 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d0421b8-8d9a-4412-8d21-cc166718ab71" containerName="init" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.907765 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d0421b8-8d9a-4412-8d21-cc166718ab71" containerName="init" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.908408 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d0421b8-8d9a-4412-8d21-cc166718ab71" containerName="init" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.910569 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.915219 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.915551 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-rjt2w" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.919133 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.921420 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.925885 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.936802 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.968429 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:56:55 crc kubenswrapper[4861]: W0129 07:56:55.974123 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e61fc48_3390_4b35_956f_843772ead36e.slice/crio-773a810ec46a719bcaeb10d3633c6dbf023f2255e741a43926df8fa419cfaf9c WatchSource:0}: Error finding container 773a810ec46a719bcaeb10d3633c6dbf023f2255e741a43926df8fa419cfaf9c: Status 404 returned error can't find the container with id 773a810ec46a719bcaeb10d3633c6dbf023f2255e741a43926df8fa419cfaf9c Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.975484 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"285529a3-bf50-4815-b38e-e95a0d291fb6","Type":"ContainerStarted","Data":"9f853570ace0d21bfd4e6ad1d0dac66e2f387e30a3859e4b6b8316b33fc78444"} Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.978509 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" event={"ID":"79aab840-0e43-4ef0-8fba-793c022f49c4","Type":"ContainerStarted","Data":"550adb00fd16ef1661c7027e2d9877ac2284c73204d6853f5676f9f138e634d8"} Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.979352 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.982109 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.987799 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-wbqdd" event={"ID":"7d0421b8-8d9a-4412-8d21-cc166718ab71","Type":"ContainerDied","Data":"d927a4433b428dc5c4a10f347aab04a1b997f1d82fdcaa3dc065eea49d2c933a"} Jan 29 07:56:55 crc kubenswrapper[4861]: I0129 07:56:55.987982 4861 scope.go:117] "RemoveContainer" containerID="10f1d53619563782490e71f1931637bbb75ab7e8867cd9b314d9a9eea79b4545" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.005500 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca4fde45-47fb-44fa-baea-904bfec6b6e8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.005979 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.006001 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-config-data-default\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.006039 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8chn\" (UniqueName: \"kubernetes.io/projected/ca4fde45-47fb-44fa-baea-904bfec6b6e8-kube-api-access-c8chn\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.006092 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4ccc747a-65ed-47f7-a6a3-fa37fb4e5897\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4ccc747a-65ed-47f7-a6a3-fa37fb4e5897\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.006142 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ca4fde45-47fb-44fa-baea-904bfec6b6e8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.006175 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca4fde45-47fb-44fa-baea-904bfec6b6e8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.006210 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-kolla-config\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.008893 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" podStartSLOduration=3.00887902 podStartE2EDuration="3.00887902s" podCreationTimestamp="2026-01-29 07:56:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:56:56.001774264 +0000 UTC m=+4907.673268851" watchObservedRunningTime="2026-01-29 07:56:56.00887902 +0000 UTC m=+4907.680373587" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.088213 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-wbqdd"] Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.105555 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-wbqdd"] Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.107786 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-kolla-config\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.107842 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca4fde45-47fb-44fa-baea-904bfec6b6e8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.107900 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-config-data-default\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.107926 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.107997 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8chn\" (UniqueName: \"kubernetes.io/projected/ca4fde45-47fb-44fa-baea-904bfec6b6e8-kube-api-access-c8chn\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.108047 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4ccc747a-65ed-47f7-a6a3-fa37fb4e5897\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4ccc747a-65ed-47f7-a6a3-fa37fb4e5897\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.108129 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ca4fde45-47fb-44fa-baea-904bfec6b6e8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.108166 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca4fde45-47fb-44fa-baea-904bfec6b6e8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.109598 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-kolla-config\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.111125 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-config-data-default\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.112130 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca4fde45-47fb-44fa-baea-904bfec6b6e8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.112766 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ca4fde45-47fb-44fa-baea-904bfec6b6e8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.123039 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca4fde45-47fb-44fa-baea-904bfec6b6e8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.131424 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca4fde45-47fb-44fa-baea-904bfec6b6e8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.148161 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8chn\" (UniqueName: \"kubernetes.io/projected/ca4fde45-47fb-44fa-baea-904bfec6b6e8-kube-api-access-c8chn\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.166980 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.167029 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4ccc747a-65ed-47f7-a6a3-fa37fb4e5897\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4ccc747a-65ed-47f7-a6a3-fa37fb4e5897\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1709e44f06b1e5814a502d26faaa7dc186cf2e60cf39745f0cb5fc97e3d8ceb3/globalmount\"" pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.287554 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4ccc747a-65ed-47f7-a6a3-fa37fb4e5897\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4ccc747a-65ed-47f7-a6a3-fa37fb4e5897\") pod \"openstack-galera-0\" (UID: \"ca4fde45-47fb-44fa-baea-904bfec6b6e8\") " pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.540622 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.978943 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 07:56:56 crc kubenswrapper[4861]: W0129 07:56:56.979218 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca4fde45_47fb_44fa_baea_904bfec6b6e8.slice/crio-4f6b60f70f448f41d90cd5f325bf0496eb2b9f0adb409765e879ded68de5e028 WatchSource:0}: Error finding container 4f6b60f70f448f41d90cd5f325bf0496eb2b9f0adb409765e879ded68de5e028: Status 404 returned error can't find the container with id 4f6b60f70f448f41d90cd5f325bf0496eb2b9f0adb409765e879ded68de5e028 Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.990237 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e61fc48-3390-4b35-956f-843772ead36e","Type":"ContainerStarted","Data":"773a810ec46a719bcaeb10d3633c6dbf023f2255e741a43926df8fa419cfaf9c"} Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.992900 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ca4fde45-47fb-44fa-baea-904bfec6b6e8","Type":"ContainerStarted","Data":"4f6b60f70f448f41d90cd5f325bf0496eb2b9f0adb409765e879ded68de5e028"} Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.995222 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-brqk5" event={"ID":"d4fbe1c9-5cf9-4716-8367-e003d526fe53","Type":"ContainerStarted","Data":"1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912"} Jan 29 07:56:56 crc kubenswrapper[4861]: I0129 07:56:56.995431 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.018087 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-95587bc99-brqk5" podStartSLOduration=4.018056855 podStartE2EDuration="4.018056855s" podCreationTimestamp="2026-01-29 07:56:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:56:57.010058805 +0000 UTC m=+4908.681553422" watchObservedRunningTime="2026-01-29 07:56:57.018056855 +0000 UTC m=+4908.689551412" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.129798 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d0421b8-8d9a-4412-8d21-cc166718ab71" path="/var/lib/kubelet/pods/7d0421b8-8d9a-4412-8d21-cc166718ab71/volumes" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.130392 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dcca8fe-38da-4ee3-a291-f88bb5a1cb87" path="/var/lib/kubelet/pods/9dcca8fe-38da-4ee3-a291-f88bb5a1cb87/volumes" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.566924 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.568383 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.570554 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-tz5bp" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.570593 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.572358 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.572711 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.595444 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.636598 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da8e0ee-9c1a-4557-a727-14de15187b68-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.636731 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.636835 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d2060a89-eccd-4f68-b32a-58bd43e0af0d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d2060a89-eccd-4f68-b32a-58bd43e0af0d\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.636881 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm5jn\" (UniqueName: \"kubernetes.io/projected/3da8e0ee-9c1a-4557-a727-14de15187b68-kube-api-access-tm5jn\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.636915 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3da8e0ee-9c1a-4557-a727-14de15187b68-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.636951 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.637319 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.637424 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3da8e0ee-9c1a-4557-a727-14de15187b68-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.643266 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.644383 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.646555 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.646736 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.646864 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-qjwgd" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.661624 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738551 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da8e0ee-9c1a-4557-a727-14de15187b68-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738594 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1079f778-4a50-46fe-a07a-5a7059b2865d-config-data\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738641 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738664 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1079f778-4a50-46fe-a07a-5a7059b2865d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738777 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d2060a89-eccd-4f68-b32a-58bd43e0af0d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d2060a89-eccd-4f68-b32a-58bd43e0af0d\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738834 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm5jn\" (UniqueName: \"kubernetes.io/projected/3da8e0ee-9c1a-4557-a727-14de15187b68-kube-api-access-tm5jn\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738853 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3da8e0ee-9c1a-4557-a727-14de15187b68-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738873 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738891 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1079f778-4a50-46fe-a07a-5a7059b2865d-kolla-config\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738927 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738949 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3da8e0ee-9c1a-4557-a727-14de15187b68-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738969 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njxvn\" (UniqueName: \"kubernetes.io/projected/1079f778-4a50-46fe-a07a-5a7059b2865d-kube-api-access-njxvn\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.738991 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1079f778-4a50-46fe-a07a-5a7059b2865d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.739571 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3da8e0ee-9c1a-4557-a727-14de15187b68-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.740054 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.740229 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.740236 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3da8e0ee-9c1a-4557-a727-14de15187b68-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.741739 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.741767 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d2060a89-eccd-4f68-b32a-58bd43e0af0d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d2060a89-eccd-4f68-b32a-58bd43e0af0d\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bb55dfba1b6279fd3ca98a8a28a1b290553df74f8420b078faee852d8481ed8f/globalmount\"" pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.743099 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3da8e0ee-9c1a-4557-a727-14de15187b68-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.743367 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da8e0ee-9c1a-4557-a727-14de15187b68-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.761326 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm5jn\" (UniqueName: \"kubernetes.io/projected/3da8e0ee-9c1a-4557-a727-14de15187b68-kube-api-access-tm5jn\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.777213 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d2060a89-eccd-4f68-b32a-58bd43e0af0d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d2060a89-eccd-4f68-b32a-58bd43e0af0d\") pod \"openstack-cell1-galera-0\" (UID: \"3da8e0ee-9c1a-4557-a727-14de15187b68\") " pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.830394 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.840103 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1079f778-4a50-46fe-a07a-5a7059b2865d-config-data\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.840186 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1079f778-4a50-46fe-a07a-5a7059b2865d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.840227 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1079f778-4a50-46fe-a07a-5a7059b2865d-kolla-config\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.840278 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njxvn\" (UniqueName: \"kubernetes.io/projected/1079f778-4a50-46fe-a07a-5a7059b2865d-kube-api-access-njxvn\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.840303 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1079f778-4a50-46fe-a07a-5a7059b2865d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.841186 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1079f778-4a50-46fe-a07a-5a7059b2865d-kolla-config\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.842306 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1079f778-4a50-46fe-a07a-5a7059b2865d-config-data\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.845509 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1079f778-4a50-46fe-a07a-5a7059b2865d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.846840 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1079f778-4a50-46fe-a07a-5a7059b2865d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:57 crc kubenswrapper[4861]: I0129 07:56:57.866696 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njxvn\" (UniqueName: \"kubernetes.io/projected/1079f778-4a50-46fe-a07a-5a7059b2865d-kube-api-access-njxvn\") pod \"memcached-0\" (UID: \"1079f778-4a50-46fe-a07a-5a7059b2865d\") " pod="openstack/memcached-0" Jan 29 07:56:58 crc kubenswrapper[4861]: I0129 07:56:58.006540 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"285529a3-bf50-4815-b38e-e95a0d291fb6","Type":"ContainerStarted","Data":"ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f"} Jan 29 07:56:58 crc kubenswrapper[4861]: I0129 07:56:58.009027 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e61fc48-3390-4b35-956f-843772ead36e","Type":"ContainerStarted","Data":"968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c"} Jan 29 07:56:58 crc kubenswrapper[4861]: I0129 07:56:58.011211 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ca4fde45-47fb-44fa-baea-904bfec6b6e8","Type":"ContainerStarted","Data":"80d99cf917c54379aadfa8da25479a0f5b2e4f96a06bcf51949bc386b9d1251a"} Jan 29 07:56:58 crc kubenswrapper[4861]: I0129 07:56:58.139888 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 07:56:58 crc kubenswrapper[4861]: I0129 07:56:58.329606 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 07:56:58 crc kubenswrapper[4861]: W0129 07:56:58.332625 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3da8e0ee_9c1a_4557_a727_14de15187b68.slice/crio-bcf87f7cddf2f0d007e9bf9627ca2862f78872b9947ff2b4d40bd685d3873160 WatchSource:0}: Error finding container bcf87f7cddf2f0d007e9bf9627ca2862f78872b9947ff2b4d40bd685d3873160: Status 404 returned error can't find the container with id bcf87f7cddf2f0d007e9bf9627ca2862f78872b9947ff2b4d40bd685d3873160 Jan 29 07:56:58 crc kubenswrapper[4861]: I0129 07:56:58.593843 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 07:56:58 crc kubenswrapper[4861]: W0129 07:56:58.600263 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1079f778_4a50_46fe_a07a_5a7059b2865d.slice/crio-2e0feb7dc7121df0d01665eb97769f63c1ef00b1037b0a2392c1598c31b292f8 WatchSource:0}: Error finding container 2e0feb7dc7121df0d01665eb97769f63c1ef00b1037b0a2392c1598c31b292f8: Status 404 returned error can't find the container with id 2e0feb7dc7121df0d01665eb97769f63c1ef00b1037b0a2392c1598c31b292f8 Jan 29 07:56:59 crc kubenswrapper[4861]: I0129 07:56:59.021135 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3da8e0ee-9c1a-4557-a727-14de15187b68","Type":"ContainerStarted","Data":"c1911c7d958f05f4526a0518b9e5cd597624173a749629670de4150424346f9f"} Jan 29 07:56:59 crc kubenswrapper[4861]: I0129 07:56:59.021691 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3da8e0ee-9c1a-4557-a727-14de15187b68","Type":"ContainerStarted","Data":"bcf87f7cddf2f0d007e9bf9627ca2862f78872b9947ff2b4d40bd685d3873160"} Jan 29 07:56:59 crc kubenswrapper[4861]: I0129 07:56:59.023725 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"1079f778-4a50-46fe-a07a-5a7059b2865d","Type":"ContainerStarted","Data":"277a26d487d76d50a83440ba9358a5abbed381baa45c719b51234cc5cce7d3ad"} Jan 29 07:56:59 crc kubenswrapper[4861]: I0129 07:56:59.023828 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"1079f778-4a50-46fe-a07a-5a7059b2865d","Type":"ContainerStarted","Data":"2e0feb7dc7121df0d01665eb97769f63c1ef00b1037b0a2392c1598c31b292f8"} Jan 29 07:56:59 crc kubenswrapper[4861]: I0129 07:56:59.083721 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.083695183 podStartE2EDuration="2.083695183s" podCreationTimestamp="2026-01-29 07:56:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:56:59.079427941 +0000 UTC m=+4910.750922518" watchObservedRunningTime="2026-01-29 07:56:59.083695183 +0000 UTC m=+4910.755189750" Jan 29 07:57:00 crc kubenswrapper[4861]: I0129 07:57:00.032304 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 29 07:57:02 crc kubenswrapper[4861]: I0129 07:57:02.056179 4861 generic.go:334] "Generic (PLEG): container finished" podID="3da8e0ee-9c1a-4557-a727-14de15187b68" containerID="c1911c7d958f05f4526a0518b9e5cd597624173a749629670de4150424346f9f" exitCode=0 Jan 29 07:57:02 crc kubenswrapper[4861]: I0129 07:57:02.056837 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3da8e0ee-9c1a-4557-a727-14de15187b68","Type":"ContainerDied","Data":"c1911c7d958f05f4526a0518b9e5cd597624173a749629670de4150424346f9f"} Jan 29 07:57:02 crc kubenswrapper[4861]: I0129 07:57:02.061469 4861 generic.go:334] "Generic (PLEG): container finished" podID="ca4fde45-47fb-44fa-baea-904bfec6b6e8" containerID="80d99cf917c54379aadfa8da25479a0f5b2e4f96a06bcf51949bc386b9d1251a" exitCode=0 Jan 29 07:57:02 crc kubenswrapper[4861]: I0129 07:57:02.061534 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ca4fde45-47fb-44fa-baea-904bfec6b6e8","Type":"ContainerDied","Data":"80d99cf917c54379aadfa8da25479a0f5b2e4f96a06bcf51949bc386b9d1251a"} Jan 29 07:57:03 crc kubenswrapper[4861]: I0129 07:57:03.070552 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ca4fde45-47fb-44fa-baea-904bfec6b6e8","Type":"ContainerStarted","Data":"e33fb15b7e48759091e4edcc94db87eedeab413723c859d71cad27addbfe306d"} Jan 29 07:57:03 crc kubenswrapper[4861]: I0129 07:57:03.072656 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3da8e0ee-9c1a-4557-a727-14de15187b68","Type":"ContainerStarted","Data":"8a84be2ca29184abade971f404cbb7b2f1f399c214a33db7f041323b0d6b5c12"} Jan 29 07:57:03 crc kubenswrapper[4861]: I0129 07:57:03.104598 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.104576774 podStartE2EDuration="9.104576774s" podCreationTimestamp="2026-01-29 07:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:57:03.097024216 +0000 UTC m=+4914.768518873" watchObservedRunningTime="2026-01-29 07:57:03.104576774 +0000 UTC m=+4914.776071351" Jan 29 07:57:03 crc kubenswrapper[4861]: I0129 07:57:03.140528 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=7.140507347 podStartE2EDuration="7.140507347s" podCreationTimestamp="2026-01-29 07:56:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:57:03.132362963 +0000 UTC m=+4914.803857540" watchObservedRunningTime="2026-01-29 07:57:03.140507347 +0000 UTC m=+4914.812001904" Jan 29 07:57:03 crc kubenswrapper[4861]: I0129 07:57:03.144655 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 29 07:57:03 crc kubenswrapper[4861]: I0129 07:57:03.744356 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.156254 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.204646 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-brqk5"] Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.204892 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-95587bc99-brqk5" podUID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" containerName="dnsmasq-dns" containerID="cri-o://1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912" gracePeriod=10 Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.617153 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.767265 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppsfj\" (UniqueName: \"kubernetes.io/projected/d4fbe1c9-5cf9-4716-8367-e003d526fe53-kube-api-access-ppsfj\") pod \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.767465 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-config\") pod \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.767540 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-dns-svc\") pod \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\" (UID: \"d4fbe1c9-5cf9-4716-8367-e003d526fe53\") " Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.773280 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4fbe1c9-5cf9-4716-8367-e003d526fe53-kube-api-access-ppsfj" (OuterVolumeSpecName: "kube-api-access-ppsfj") pod "d4fbe1c9-5cf9-4716-8367-e003d526fe53" (UID: "d4fbe1c9-5cf9-4716-8367-e003d526fe53"). InnerVolumeSpecName "kube-api-access-ppsfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.798797 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-config" (OuterVolumeSpecName: "config") pod "d4fbe1c9-5cf9-4716-8367-e003d526fe53" (UID: "d4fbe1c9-5cf9-4716-8367-e003d526fe53"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.802826 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d4fbe1c9-5cf9-4716-8367-e003d526fe53" (UID: "d4fbe1c9-5cf9-4716-8367-e003d526fe53"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.869694 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppsfj\" (UniqueName: \"kubernetes.io/projected/d4fbe1c9-5cf9-4716-8367-e003d526fe53-kube-api-access-ppsfj\") on node \"crc\" DevicePath \"\"" Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.869726 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-config\") on node \"crc\" DevicePath \"\"" Jan 29 07:57:04 crc kubenswrapper[4861]: I0129 07:57:04.869758 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4fbe1c9-5cf9-4716-8367-e003d526fe53-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.093621 4861 generic.go:334] "Generic (PLEG): container finished" podID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" containerID="1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912" exitCode=0 Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.093760 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-brqk5" Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.094064 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-brqk5" event={"ID":"d4fbe1c9-5cf9-4716-8367-e003d526fe53","Type":"ContainerDied","Data":"1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912"} Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.094222 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-brqk5" event={"ID":"d4fbe1c9-5cf9-4716-8367-e003d526fe53","Type":"ContainerDied","Data":"1c547391ce0ed8b24408d7d41b2e77634f78a027cca5934d2588ea04e1b630d9"} Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.094352 4861 scope.go:117] "RemoveContainer" containerID="1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912" Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.118254 4861 scope.go:117] "RemoveContainer" containerID="eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee" Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.138129 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-brqk5"] Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.141110 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-brqk5"] Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.155428 4861 scope.go:117] "RemoveContainer" containerID="1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912" Jan 29 07:57:05 crc kubenswrapper[4861]: E0129 07:57:05.155968 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912\": container with ID starting with 1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912 not found: ID does not exist" containerID="1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912" Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.156016 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912"} err="failed to get container status \"1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912\": rpc error: code = NotFound desc = could not find container \"1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912\": container with ID starting with 1df1a65b6c647bc22359979fa9fc5f10921890ce4f4b9dc9aa100d97c204c912 not found: ID does not exist" Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.156043 4861 scope.go:117] "RemoveContainer" containerID="eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee" Jan 29 07:57:05 crc kubenswrapper[4861]: E0129 07:57:05.156530 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee\": container with ID starting with eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee not found: ID does not exist" containerID="eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee" Jan 29 07:57:05 crc kubenswrapper[4861]: I0129 07:57:05.156575 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee"} err="failed to get container status \"eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee\": rpc error: code = NotFound desc = could not find container \"eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee\": container with ID starting with eefbf6b5e0b61625bbd9a7cbf57695e9b39c526fa41d106d439599022f5c83ee not found: ID does not exist" Jan 29 07:57:05 crc kubenswrapper[4861]: E0129 07:57:05.512982 4861 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.80:51804->38.102.83.80:46667: write tcp 38.102.83.80:51804->38.102.83.80:46667: write: broken pipe Jan 29 07:57:06 crc kubenswrapper[4861]: I0129 07:57:06.541737 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 29 07:57:06 crc kubenswrapper[4861]: I0129 07:57:06.542340 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 29 07:57:07 crc kubenswrapper[4861]: I0129 07:57:07.013849 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 29 07:57:07 crc kubenswrapper[4861]: I0129 07:57:07.132637 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" path="/var/lib/kubelet/pods/d4fbe1c9-5cf9-4716-8367-e003d526fe53/volumes" Jan 29 07:57:07 crc kubenswrapper[4861]: I0129 07:57:07.218246 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 29 07:57:07 crc kubenswrapper[4861]: I0129 07:57:07.830911 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 29 07:57:07 crc kubenswrapper[4861]: I0129 07:57:07.830966 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 29 07:57:08 crc kubenswrapper[4861]: I0129 07:57:08.950101 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 29 07:57:09 crc kubenswrapper[4861]: I0129 07:57:09.045796 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 29 07:57:14 crc kubenswrapper[4861]: I0129 07:57:14.878496 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-qhr4r"] Jan 29 07:57:14 crc kubenswrapper[4861]: E0129 07:57:14.887410 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" containerName="init" Jan 29 07:57:14 crc kubenswrapper[4861]: I0129 07:57:14.887469 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" containerName="init" Jan 29 07:57:14 crc kubenswrapper[4861]: E0129 07:57:14.887586 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" containerName="dnsmasq-dns" Jan 29 07:57:14 crc kubenswrapper[4861]: I0129 07:57:14.887601 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" containerName="dnsmasq-dns" Jan 29 07:57:14 crc kubenswrapper[4861]: I0129 07:57:14.888504 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4fbe1c9-5cf9-4716-8367-e003d526fe53" containerName="dnsmasq-dns" Jan 29 07:57:14 crc kubenswrapper[4861]: I0129 07:57:14.892170 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:14 crc kubenswrapper[4861]: I0129 07:57:14.896358 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 29 07:57:14 crc kubenswrapper[4861]: I0129 07:57:14.912744 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-qhr4r"] Jan 29 07:57:15 crc kubenswrapper[4861]: I0129 07:57:15.003527 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb4bf\" (UniqueName: \"kubernetes.io/projected/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-kube-api-access-jb4bf\") pod \"root-account-create-update-qhr4r\" (UID: \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\") " pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:15 crc kubenswrapper[4861]: I0129 07:57:15.003752 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-operator-scripts\") pod \"root-account-create-update-qhr4r\" (UID: \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\") " pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:15 crc kubenswrapper[4861]: I0129 07:57:15.106248 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb4bf\" (UniqueName: \"kubernetes.io/projected/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-kube-api-access-jb4bf\") pod \"root-account-create-update-qhr4r\" (UID: \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\") " pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:15 crc kubenswrapper[4861]: I0129 07:57:15.106384 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-operator-scripts\") pod \"root-account-create-update-qhr4r\" (UID: \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\") " pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:15 crc kubenswrapper[4861]: I0129 07:57:15.107826 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-operator-scripts\") pod \"root-account-create-update-qhr4r\" (UID: \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\") " pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:15 crc kubenswrapper[4861]: I0129 07:57:15.140488 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb4bf\" (UniqueName: \"kubernetes.io/projected/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-kube-api-access-jb4bf\") pod \"root-account-create-update-qhr4r\" (UID: \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\") " pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:15 crc kubenswrapper[4861]: I0129 07:57:15.226971 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:15 crc kubenswrapper[4861]: I0129 07:57:15.705764 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-qhr4r"] Jan 29 07:57:16 crc kubenswrapper[4861]: I0129 07:57:16.209298 4861 generic.go:334] "Generic (PLEG): container finished" podID="a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64" containerID="2befb88b6b29953445d205188809a3dc31da1a81f34f879665dc3c3ebde1246e" exitCode=0 Jan 29 07:57:16 crc kubenswrapper[4861]: I0129 07:57:16.209486 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-qhr4r" event={"ID":"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64","Type":"ContainerDied","Data":"2befb88b6b29953445d205188809a3dc31da1a81f34f879665dc3c3ebde1246e"} Jan 29 07:57:16 crc kubenswrapper[4861]: I0129 07:57:16.209632 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-qhr4r" event={"ID":"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64","Type":"ContainerStarted","Data":"edf25dc21a3a7a9b63b03af558ac88be2629f7ef86ffd1cd82f94104b38b86de"} Jan 29 07:57:17 crc kubenswrapper[4861]: I0129 07:57:17.647792 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:17 crc kubenswrapper[4861]: I0129 07:57:17.845472 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jb4bf\" (UniqueName: \"kubernetes.io/projected/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-kube-api-access-jb4bf\") pod \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\" (UID: \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\") " Jan 29 07:57:17 crc kubenswrapper[4861]: I0129 07:57:17.845527 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-operator-scripts\") pod \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\" (UID: \"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64\") " Jan 29 07:57:17 crc kubenswrapper[4861]: I0129 07:57:17.846466 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64" (UID: "a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:57:17 crc kubenswrapper[4861]: I0129 07:57:17.851867 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-kube-api-access-jb4bf" (OuterVolumeSpecName: "kube-api-access-jb4bf") pod "a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64" (UID: "a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64"). InnerVolumeSpecName "kube-api-access-jb4bf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:57:17 crc kubenswrapper[4861]: I0129 07:57:17.948535 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jb4bf\" (UniqueName: \"kubernetes.io/projected/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-kube-api-access-jb4bf\") on node \"crc\" DevicePath \"\"" Jan 29 07:57:17 crc kubenswrapper[4861]: I0129 07:57:17.948577 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 07:57:18 crc kubenswrapper[4861]: I0129 07:57:18.230130 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-qhr4r" event={"ID":"a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64","Type":"ContainerDied","Data":"edf25dc21a3a7a9b63b03af558ac88be2629f7ef86ffd1cd82f94104b38b86de"} Jan 29 07:57:18 crc kubenswrapper[4861]: I0129 07:57:18.230174 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="edf25dc21a3a7a9b63b03af558ac88be2629f7ef86ffd1cd82f94104b38b86de" Jan 29 07:57:18 crc kubenswrapper[4861]: I0129 07:57:18.230224 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-qhr4r" Jan 29 07:57:21 crc kubenswrapper[4861]: I0129 07:57:21.538371 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-qhr4r"] Jan 29 07:57:21 crc kubenswrapper[4861]: I0129 07:57:21.543214 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-qhr4r"] Jan 29 07:57:23 crc kubenswrapper[4861]: I0129 07:57:23.131771 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64" path="/var/lib/kubelet/pods/a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64/volumes" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.551466 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-94df6"] Jan 29 07:57:26 crc kubenswrapper[4861]: E0129 07:57:26.553950 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64" containerName="mariadb-account-create-update" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.553978 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64" containerName="mariadb-account-create-update" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.554342 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a99b580b-fd6c-4cf3-86f2-e2dacfdf5f64" containerName="mariadb-account-create-update" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.555119 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-94df6" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.557117 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.618453 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-94df6"] Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.715827 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4862baf6-8fde-4e47-9667-0193440f9c36-operator-scripts\") pod \"root-account-create-update-94df6\" (UID: \"4862baf6-8fde-4e47-9667-0193440f9c36\") " pod="openstack/root-account-create-update-94df6" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.716056 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhgpr\" (UniqueName: \"kubernetes.io/projected/4862baf6-8fde-4e47-9667-0193440f9c36-kube-api-access-zhgpr\") pod \"root-account-create-update-94df6\" (UID: \"4862baf6-8fde-4e47-9667-0193440f9c36\") " pod="openstack/root-account-create-update-94df6" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.817421 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4862baf6-8fde-4e47-9667-0193440f9c36-operator-scripts\") pod \"root-account-create-update-94df6\" (UID: \"4862baf6-8fde-4e47-9667-0193440f9c36\") " pod="openstack/root-account-create-update-94df6" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.817483 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhgpr\" (UniqueName: \"kubernetes.io/projected/4862baf6-8fde-4e47-9667-0193440f9c36-kube-api-access-zhgpr\") pod \"root-account-create-update-94df6\" (UID: \"4862baf6-8fde-4e47-9667-0193440f9c36\") " pod="openstack/root-account-create-update-94df6" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.818762 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4862baf6-8fde-4e47-9667-0193440f9c36-operator-scripts\") pod \"root-account-create-update-94df6\" (UID: \"4862baf6-8fde-4e47-9667-0193440f9c36\") " pod="openstack/root-account-create-update-94df6" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.845273 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhgpr\" (UniqueName: \"kubernetes.io/projected/4862baf6-8fde-4e47-9667-0193440f9c36-kube-api-access-zhgpr\") pod \"root-account-create-update-94df6\" (UID: \"4862baf6-8fde-4e47-9667-0193440f9c36\") " pod="openstack/root-account-create-update-94df6" Jan 29 07:57:26 crc kubenswrapper[4861]: I0129 07:57:26.929281 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-94df6" Jan 29 07:57:27 crc kubenswrapper[4861]: I0129 07:57:27.400604 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-94df6"] Jan 29 07:57:28 crc kubenswrapper[4861]: I0129 07:57:28.324598 4861 generic.go:334] "Generic (PLEG): container finished" podID="4862baf6-8fde-4e47-9667-0193440f9c36" containerID="7310c10ed96f4c8bababd44525f7781959353d494472d076dcc2bebae1c735d2" exitCode=0 Jan 29 07:57:28 crc kubenswrapper[4861]: I0129 07:57:28.324683 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-94df6" event={"ID":"4862baf6-8fde-4e47-9667-0193440f9c36","Type":"ContainerDied","Data":"7310c10ed96f4c8bababd44525f7781959353d494472d076dcc2bebae1c735d2"} Jan 29 07:57:28 crc kubenswrapper[4861]: I0129 07:57:28.325811 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-94df6" event={"ID":"4862baf6-8fde-4e47-9667-0193440f9c36","Type":"ContainerStarted","Data":"d0b3e0b3545aa1f7d3de1d017abd6c242643bae4721912e8ea834d840379d2fa"} Jan 29 07:57:29 crc kubenswrapper[4861]: I0129 07:57:29.782589 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-94df6" Jan 29 07:57:29 crc kubenswrapper[4861]: I0129 07:57:29.871547 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4862baf6-8fde-4e47-9667-0193440f9c36-operator-scripts\") pod \"4862baf6-8fde-4e47-9667-0193440f9c36\" (UID: \"4862baf6-8fde-4e47-9667-0193440f9c36\") " Jan 29 07:57:29 crc kubenswrapper[4861]: I0129 07:57:29.871611 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhgpr\" (UniqueName: \"kubernetes.io/projected/4862baf6-8fde-4e47-9667-0193440f9c36-kube-api-access-zhgpr\") pod \"4862baf6-8fde-4e47-9667-0193440f9c36\" (UID: \"4862baf6-8fde-4e47-9667-0193440f9c36\") " Jan 29 07:57:29 crc kubenswrapper[4861]: I0129 07:57:29.872491 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4862baf6-8fde-4e47-9667-0193440f9c36-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4862baf6-8fde-4e47-9667-0193440f9c36" (UID: "4862baf6-8fde-4e47-9667-0193440f9c36"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:57:29 crc kubenswrapper[4861]: I0129 07:57:29.881308 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4862baf6-8fde-4e47-9667-0193440f9c36-kube-api-access-zhgpr" (OuterVolumeSpecName: "kube-api-access-zhgpr") pod "4862baf6-8fde-4e47-9667-0193440f9c36" (UID: "4862baf6-8fde-4e47-9667-0193440f9c36"). InnerVolumeSpecName "kube-api-access-zhgpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:57:29 crc kubenswrapper[4861]: I0129 07:57:29.973523 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4862baf6-8fde-4e47-9667-0193440f9c36-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 07:57:29 crc kubenswrapper[4861]: I0129 07:57:29.973622 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhgpr\" (UniqueName: \"kubernetes.io/projected/4862baf6-8fde-4e47-9667-0193440f9c36-kube-api-access-zhgpr\") on node \"crc\" DevicePath \"\"" Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.346417 4861 generic.go:334] "Generic (PLEG): container finished" podID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerID="ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f" exitCode=0 Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.346493 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"285529a3-bf50-4815-b38e-e95a0d291fb6","Type":"ContainerDied","Data":"ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f"} Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.349632 4861 generic.go:334] "Generic (PLEG): container finished" podID="4e61fc48-3390-4b35-956f-843772ead36e" containerID="968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c" exitCode=0 Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.349728 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e61fc48-3390-4b35-956f-843772ead36e","Type":"ContainerDied","Data":"968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c"} Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.354396 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-94df6" Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.354485 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-94df6" event={"ID":"4862baf6-8fde-4e47-9667-0193440f9c36","Type":"ContainerDied","Data":"d0b3e0b3545aa1f7d3de1d017abd6c242643bae4721912e8ea834d840379d2fa"} Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.355014 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0b3e0b3545aa1f7d3de1d017abd6c242643bae4721912e8ea834d840379d2fa" Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.629919 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:57:30 crc kubenswrapper[4861]: I0129 07:57:30.629994 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:57:31 crc kubenswrapper[4861]: I0129 07:57:31.364405 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"285529a3-bf50-4815-b38e-e95a0d291fb6","Type":"ContainerStarted","Data":"c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55"} Jan 29 07:57:31 crc kubenswrapper[4861]: I0129 07:57:31.365139 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 29 07:57:31 crc kubenswrapper[4861]: I0129 07:57:31.366876 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e61fc48-3390-4b35-956f-843772ead36e","Type":"ContainerStarted","Data":"67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2"} Jan 29 07:57:31 crc kubenswrapper[4861]: I0129 07:57:31.367171 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:57:31 crc kubenswrapper[4861]: I0129 07:57:31.408251 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.408225322 podStartE2EDuration="38.408225322s" podCreationTimestamp="2026-01-29 07:56:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:57:31.403503768 +0000 UTC m=+4943.074998395" watchObservedRunningTime="2026-01-29 07:57:31.408225322 +0000 UTC m=+4943.079719919" Jan 29 07:57:31 crc kubenswrapper[4861]: I0129 07:57:31.444423 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.444394492 podStartE2EDuration="38.444394492s" podCreationTimestamp="2026-01-29 07:56:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:57:31.4420539 +0000 UTC m=+4943.113548487" watchObservedRunningTime="2026-01-29 07:57:31.444394492 +0000 UTC m=+4943.115889079" Jan 29 07:57:44 crc kubenswrapper[4861]: I0129 07:57:44.937266 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 29 07:57:45 crc kubenswrapper[4861]: I0129 07:57:45.459298 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:57:53 crc kubenswrapper[4861]: I0129 07:57:53.858790 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-699964fbc-mdp2f"] Jan 29 07:57:53 crc kubenswrapper[4861]: E0129 07:57:53.859650 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4862baf6-8fde-4e47-9667-0193440f9c36" containerName="mariadb-account-create-update" Jan 29 07:57:53 crc kubenswrapper[4861]: I0129 07:57:53.859663 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4862baf6-8fde-4e47-9667-0193440f9c36" containerName="mariadb-account-create-update" Jan 29 07:57:53 crc kubenswrapper[4861]: I0129 07:57:53.859807 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4862baf6-8fde-4e47-9667-0193440f9c36" containerName="mariadb-account-create-update" Jan 29 07:57:53 crc kubenswrapper[4861]: I0129 07:57:53.860518 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:53 crc kubenswrapper[4861]: I0129 07:57:53.881689 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-mdp2f"] Jan 29 07:57:53 crc kubenswrapper[4861]: I0129 07:57:53.982407 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-config\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:53 crc kubenswrapper[4861]: I0129 07:57:53.982553 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6l6g\" (UniqueName: \"kubernetes.io/projected/b3372ba1-4e55-4072-b59b-ca130544ff26-kube-api-access-d6l6g\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:53 crc kubenswrapper[4861]: I0129 07:57:53.982649 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-dns-svc\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.084590 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-config\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.084667 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6l6g\" (UniqueName: \"kubernetes.io/projected/b3372ba1-4e55-4072-b59b-ca130544ff26-kube-api-access-d6l6g\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.084712 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-dns-svc\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.085627 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-dns-svc\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.085755 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-config\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.111833 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6l6g\" (UniqueName: \"kubernetes.io/projected/b3372ba1-4e55-4072-b59b-ca130544ff26-kube-api-access-d6l6g\") pod \"dnsmasq-dns-699964fbc-mdp2f\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.182225 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.448692 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:57:54 crc kubenswrapper[4861]: I0129 07:57:54.664616 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-mdp2f"] Jan 29 07:57:55 crc kubenswrapper[4861]: I0129 07:57:55.231335 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:57:55 crc kubenswrapper[4861]: I0129 07:57:55.575818 4861 generic.go:334] "Generic (PLEG): container finished" podID="b3372ba1-4e55-4072-b59b-ca130544ff26" containerID="04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993" exitCode=0 Jan 29 07:57:55 crc kubenswrapper[4861]: I0129 07:57:55.575858 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" event={"ID":"b3372ba1-4e55-4072-b59b-ca130544ff26","Type":"ContainerDied","Data":"04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993"} Jan 29 07:57:55 crc kubenswrapper[4861]: I0129 07:57:55.575885 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" event={"ID":"b3372ba1-4e55-4072-b59b-ca130544ff26","Type":"ContainerStarted","Data":"1c0f7d7e7930935e36f67a0d26e19ed1d07d474187845ae1fe784876f3a91abc"} Jan 29 07:57:56 crc kubenswrapper[4861]: I0129 07:57:56.599989 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" event={"ID":"b3372ba1-4e55-4072-b59b-ca130544ff26","Type":"ContainerStarted","Data":"8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9"} Jan 29 07:57:56 crc kubenswrapper[4861]: I0129 07:57:56.600310 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:57:56 crc kubenswrapper[4861]: I0129 07:57:56.632692 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" podStartSLOduration=3.632674562 podStartE2EDuration="3.632674562s" podCreationTimestamp="2026-01-29 07:57:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:57:56.627980929 +0000 UTC m=+4968.299475496" watchObservedRunningTime="2026-01-29 07:57:56.632674562 +0000 UTC m=+4968.304169119" Jan 29 07:57:58 crc kubenswrapper[4861]: I0129 07:57:58.694798 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerName="rabbitmq" containerID="cri-o://c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55" gracePeriod=604796 Jan 29 07:57:59 crc kubenswrapper[4861]: I0129 07:57:59.770490 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="4e61fc48-3390-4b35-956f-843772ead36e" containerName="rabbitmq" containerID="cri-o://67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2" gracePeriod=604796 Jan 29 07:58:00 crc kubenswrapper[4861]: I0129 07:58:00.629730 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:58:00 crc kubenswrapper[4861]: I0129 07:58:00.629821 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:58:04 crc kubenswrapper[4861]: I0129 07:58:04.185329 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 07:58:04 crc kubenswrapper[4861]: I0129 07:58:04.257395 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-4t9r7"] Jan 29 07:58:04 crc kubenswrapper[4861]: I0129 07:58:04.257795 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" podUID="79aab840-0e43-4ef0-8fba-793c022f49c4" containerName="dnsmasq-dns" containerID="cri-o://550adb00fd16ef1661c7027e2d9877ac2284c73204d6853f5676f9f138e634d8" gracePeriod=10 Jan 29 07:58:04 crc kubenswrapper[4861]: I0129 07:58:04.668706 4861 generic.go:334] "Generic (PLEG): container finished" podID="79aab840-0e43-4ef0-8fba-793c022f49c4" containerID="550adb00fd16ef1661c7027e2d9877ac2284c73204d6853f5676f9f138e634d8" exitCode=0 Jan 29 07:58:04 crc kubenswrapper[4861]: I0129 07:58:04.668758 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" event={"ID":"79aab840-0e43-4ef0-8fba-793c022f49c4","Type":"ContainerDied","Data":"550adb00fd16ef1661c7027e2d9877ac2284c73204d6853f5676f9f138e634d8"} Jan 29 07:58:04 crc kubenswrapper[4861]: I0129 07:58:04.830438 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:58:04 crc kubenswrapper[4861]: I0129 07:58:04.935616 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.254:5671: connect: connection refused" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.013501 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-config\") pod \"79aab840-0e43-4ef0-8fba-793c022f49c4\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.013546 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-dns-svc\") pod \"79aab840-0e43-4ef0-8fba-793c022f49c4\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.013587 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dkth\" (UniqueName: \"kubernetes.io/projected/79aab840-0e43-4ef0-8fba-793c022f49c4-kube-api-access-5dkth\") pod \"79aab840-0e43-4ef0-8fba-793c022f49c4\" (UID: \"79aab840-0e43-4ef0-8fba-793c022f49c4\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.021270 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79aab840-0e43-4ef0-8fba-793c022f49c4-kube-api-access-5dkth" (OuterVolumeSpecName: "kube-api-access-5dkth") pod "79aab840-0e43-4ef0-8fba-793c022f49c4" (UID: "79aab840-0e43-4ef0-8fba-793c022f49c4"). InnerVolumeSpecName "kube-api-access-5dkth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.051900 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "79aab840-0e43-4ef0-8fba-793c022f49c4" (UID: "79aab840-0e43-4ef0-8fba-793c022f49c4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.062495 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-config" (OuterVolumeSpecName: "config") pod "79aab840-0e43-4ef0-8fba-793c022f49c4" (UID: "79aab840-0e43-4ef0-8fba-793c022f49c4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.115169 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-config\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.115199 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aab840-0e43-4ef0-8fba-793c022f49c4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.115209 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dkth\" (UniqueName: \"kubernetes.io/projected/79aab840-0e43-4ef0-8fba-793c022f49c4-kube-api-access-5dkth\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.205282 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318102 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-server-conf\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318184 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-plugins-conf\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318223 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/285529a3-bf50-4815-b38e-e95a0d291fb6-erlang-cookie-secret\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318248 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-config-data\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318291 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jgf7\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-kube-api-access-2jgf7\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318311 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-confd\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318348 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-erlang-cookie\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318403 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-plugins\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318432 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-tls\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318493 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/285529a3-bf50-4815-b38e-e95a0d291fb6-pod-info\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.318647 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"285529a3-bf50-4815-b38e-e95a0d291fb6\" (UID: \"285529a3-bf50-4815-b38e-e95a0d291fb6\") " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.319846 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.320831 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.320964 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.322520 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-kube-api-access-2jgf7" (OuterVolumeSpecName: "kube-api-access-2jgf7") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "kube-api-access-2jgf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.326305 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/285529a3-bf50-4815-b38e-e95a0d291fb6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.329933 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.333717 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/285529a3-bf50-4815-b38e-e95a0d291fb6-pod-info" (OuterVolumeSpecName: "pod-info") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.342133 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1" (OuterVolumeSpecName: "persistence") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "pvc-265951ec-4820-48a0-858b-5bd327b4ffa1". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.343756 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-config-data" (OuterVolumeSpecName: "config-data") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.362225 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-server-conf" (OuterVolumeSpecName: "server-conf") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.417139 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "285529a3-bf50-4815-b38e-e95a0d291fb6" (UID: "285529a3-bf50-4815-b38e-e95a0d291fb6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420002 4861 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420027 4861 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/285529a3-bf50-4815-b38e-e95a0d291fb6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420039 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420050 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jgf7\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-kube-api-access-2jgf7\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420059 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420080 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420090 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420098 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/285529a3-bf50-4815-b38e-e95a0d291fb6-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420105 4861 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/285529a3-bf50-4815-b38e-e95a0d291fb6-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420131 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") on node \"crc\" " Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.420140 4861 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/285529a3-bf50-4815-b38e-e95a0d291fb6-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.437209 4861 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.437527 4861 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-265951ec-4820-48a0-858b-5bd327b4ffa1" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1") on node "crc" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.456037 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="4e61fc48-3390-4b35-956f-843772ead36e" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.255:5671: connect: connection refused" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.521548 4861 reconciler_common.go:293] "Volume detached for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.680506 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.681159 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-4t9r7" event={"ID":"79aab840-0e43-4ef0-8fba-793c022f49c4","Type":"ContainerDied","Data":"79f02e91a31e6dc67ad3baad4506483cebce6368b5ba092bf85108c72a67bdc1"} Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.681237 4861 scope.go:117] "RemoveContainer" containerID="550adb00fd16ef1661c7027e2d9877ac2284c73204d6853f5676f9f138e634d8" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.686199 4861 generic.go:334] "Generic (PLEG): container finished" podID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerID="c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55" exitCode=0 Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.686228 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"285529a3-bf50-4815-b38e-e95a0d291fb6","Type":"ContainerDied","Data":"c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55"} Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.686245 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"285529a3-bf50-4815-b38e-e95a0d291fb6","Type":"ContainerDied","Data":"9f853570ace0d21bfd4e6ad1d0dac66e2f387e30a3859e4b6b8316b33fc78444"} Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.686345 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.708534 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-4t9r7"] Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.714346 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-4t9r7"] Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.717975 4861 scope.go:117] "RemoveContainer" containerID="75f4904c6c16510c59b8908e665a078cdcb432cf08bee83aa03b3f0561d6dd8a" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.750466 4861 scope.go:117] "RemoveContainer" containerID="c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.754450 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.758316 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.801378 4861 scope.go:117] "RemoveContainer" containerID="ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.808439 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:58:05 crc kubenswrapper[4861]: E0129 07:58:05.808757 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79aab840-0e43-4ef0-8fba-793c022f49c4" containerName="dnsmasq-dns" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.808779 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="79aab840-0e43-4ef0-8fba-793c022f49c4" containerName="dnsmasq-dns" Jan 29 07:58:05 crc kubenswrapper[4861]: E0129 07:58:05.808794 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerName="rabbitmq" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.808800 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerName="rabbitmq" Jan 29 07:58:05 crc kubenswrapper[4861]: E0129 07:58:05.808814 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerName="setup-container" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.808822 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerName="setup-container" Jan 29 07:58:05 crc kubenswrapper[4861]: E0129 07:58:05.808838 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79aab840-0e43-4ef0-8fba-793c022f49c4" containerName="init" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.808844 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="79aab840-0e43-4ef0-8fba-793c022f49c4" containerName="init" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.808974 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="79aab840-0e43-4ef0-8fba-793c022f49c4" containerName="dnsmasq-dns" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.808997 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="285529a3-bf50-4815-b38e-e95a0d291fb6" containerName="rabbitmq" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.809754 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.813051 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.813254 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.813480 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.813620 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.813722 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.814399 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mctnn" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.814592 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.832410 4861 scope.go:117] "RemoveContainer" containerID="c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55" Jan 29 07:58:05 crc kubenswrapper[4861]: E0129 07:58:05.834270 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55\": container with ID starting with c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55 not found: ID does not exist" containerID="c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.834316 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55"} err="failed to get container status \"c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55\": rpc error: code = NotFound desc = could not find container \"c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55\": container with ID starting with c522f3c2fcff148c894e7bec43d38e180195bdc25613ff60264e84c4523f4e55 not found: ID does not exist" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.834344 4861 scope.go:117] "RemoveContainer" containerID="ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.834722 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:58:05 crc kubenswrapper[4861]: E0129 07:58:05.834738 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f\": container with ID starting with ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f not found: ID does not exist" containerID="ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.834811 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f"} err="failed to get container status \"ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f\": rpc error: code = NotFound desc = could not find container \"ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f\": container with ID starting with ce423997493a1b593bbcb9e9b09bbc83b8d3f09c036150a7fba43d86a7ae291f not found: ID does not exist" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928009 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-server-conf\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928053 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928098 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928131 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928150 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928165 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928184 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36a174b6-c79e-4486-81d0-16a0ddb54e96-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928199 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928218 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qvqg\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-kube-api-access-4qvqg\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928240 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-config-data\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:05 crc kubenswrapper[4861]: I0129 07:58:05.928282 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36a174b6-c79e-4486-81d0-16a0ddb54e96-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029435 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029484 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029508 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029539 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36a174b6-c79e-4486-81d0-16a0ddb54e96-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029565 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029598 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qvqg\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-kube-api-access-4qvqg\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029630 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-config-data\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029686 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36a174b6-c79e-4486-81d0-16a0ddb54e96-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029730 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-server-conf\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029760 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.029801 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.030640 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.030815 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.031354 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.031520 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-config-data\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.032336 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/36a174b6-c79e-4486-81d0-16a0ddb54e96-server-conf\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.033505 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.033549 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/84fb7c1aa0e6c235ee3373db0d2c7415ab4d1e3824fb691c7d47a18ee7d0cead/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.034892 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36a174b6-c79e-4486-81d0-16a0ddb54e96-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.038362 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.041808 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36a174b6-c79e-4486-81d0-16a0ddb54e96-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.043134 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.056271 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qvqg\" (UniqueName: \"kubernetes.io/projected/36a174b6-c79e-4486-81d0-16a0ddb54e96-kube-api-access-4qvqg\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.065805 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-265951ec-4820-48a0-858b-5bd327b4ffa1\") pod \"rabbitmq-server-0\" (UID: \"36a174b6-c79e-4486-81d0-16a0ddb54e96\") " pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.163274 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.307789 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.438812 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-tls\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443625 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-erlang-cookie\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443705 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443728 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-confd\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443748 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-server-conf\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443804 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-plugins-conf\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443827 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-config-data\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443870 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e61fc48-3390-4b35-956f-843772ead36e-erlang-cookie-secret\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443923 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jmp6\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-kube-api-access-7jmp6\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443949 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e61fc48-3390-4b35-956f-843772ead36e-pod-info\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.443972 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-plugins\") pod \"4e61fc48-3390-4b35-956f-843772ead36e\" (UID: \"4e61fc48-3390-4b35-956f-843772ead36e\") " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.445018 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.445186 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.446017 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.446046 4861 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.447720 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.449338 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-kube-api-access-7jmp6" (OuterVolumeSpecName: "kube-api-access-7jmp6") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "kube-api-access-7jmp6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.449382 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.452238 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4e61fc48-3390-4b35-956f-843772ead36e-pod-info" (OuterVolumeSpecName: "pod-info") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.458540 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324" (OuterVolumeSpecName: "persistence") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.465347 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e61fc48-3390-4b35-956f-843772ead36e-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.479033 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-config-data" (OuterVolumeSpecName: "config-data") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.482833 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-server-conf" (OuterVolumeSpecName: "server-conf") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.528465 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4e61fc48-3390-4b35-956f-843772ead36e" (UID: "4e61fc48-3390-4b35-956f-843772ead36e"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551285 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551337 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") on node \"crc\" " Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551349 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551359 4861 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551368 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e61fc48-3390-4b35-956f-843772ead36e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551380 4861 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e61fc48-3390-4b35-956f-843772ead36e-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551391 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jmp6\" (UniqueName: \"kubernetes.io/projected/4e61fc48-3390-4b35-956f-843772ead36e-kube-api-access-7jmp6\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551400 4861 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e61fc48-3390-4b35-956f-843772ead36e-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.551408 4861 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e61fc48-3390-4b35-956f-843772ead36e-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.565218 4861 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.565519 4861 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324") on node "crc" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.652596 4861 reconciler_common.go:293] "Volume detached for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") on node \"crc\" DevicePath \"\"" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.695788 4861 generic.go:334] "Generic (PLEG): container finished" podID="4e61fc48-3390-4b35-956f-843772ead36e" containerID="67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2" exitCode=0 Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.695864 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e61fc48-3390-4b35-956f-843772ead36e","Type":"ContainerDied","Data":"67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2"} Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.695899 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e61fc48-3390-4b35-956f-843772ead36e","Type":"ContainerDied","Data":"773a810ec46a719bcaeb10d3633c6dbf023f2255e741a43926df8fa419cfaf9c"} Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.695927 4861 scope.go:117] "RemoveContainer" containerID="67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.696063 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.721880 4861 scope.go:117] "RemoveContainer" containerID="968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.738714 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.744339 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.764412 4861 scope.go:117] "RemoveContainer" containerID="67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2" Jan 29 07:58:06 crc kubenswrapper[4861]: E0129 07:58:06.770367 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2\": container with ID starting with 67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2 not found: ID does not exist" containerID="67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.770425 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2"} err="failed to get container status \"67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2\": rpc error: code = NotFound desc = could not find container \"67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2\": container with ID starting with 67009ea140a7131037948499793251be90934552ecc11b2a75572eda5ff497c2 not found: ID does not exist" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.770453 4861 scope.go:117] "RemoveContainer" containerID="968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c" Jan 29 07:58:06 crc kubenswrapper[4861]: E0129 07:58:06.774204 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c\": container with ID starting with 968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c not found: ID does not exist" containerID="968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.774250 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c"} err="failed to get container status \"968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c\": rpc error: code = NotFound desc = could not find container \"968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c\": container with ID starting with 968e81a37bdd019acd6a646efa2f1949e70f5de0963e2689b0239ab22ffd2d6c not found: ID does not exist" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.774848 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:58:06 crc kubenswrapper[4861]: E0129 07:58:06.776596 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e61fc48-3390-4b35-956f-843772ead36e" containerName="rabbitmq" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.776613 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e61fc48-3390-4b35-956f-843772ead36e" containerName="rabbitmq" Jan 29 07:58:06 crc kubenswrapper[4861]: E0129 07:58:06.776629 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e61fc48-3390-4b35-956f-843772ead36e" containerName="setup-container" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.776634 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e61fc48-3390-4b35-956f-843772ead36e" containerName="setup-container" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.776785 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e61fc48-3390-4b35-956f-843772ead36e" containerName="rabbitmq" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.777522 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.779637 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.780998 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.781253 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.781407 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.781628 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.781807 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-6zc8k" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.781842 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.783806 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.956517 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvc26\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-kube-api-access-wvc26\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.956606 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/511ae511-f29a-4c04-b655-b6168b8622db-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.956673 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.956705 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.956754 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.956831 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.956862 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/511ae511-f29a-4c04-b655-b6168b8622db-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.956914 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.957002 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.957221 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:06 crc kubenswrapper[4861]: I0129 07:58:06.957271 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058491 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/511ae511-f29a-4c04-b655-b6168b8622db-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058564 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058589 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058630 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058669 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058699 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/511ae511-f29a-4c04-b655-b6168b8622db-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058737 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058797 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058846 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058875 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.058975 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvc26\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-kube-api-access-wvc26\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.060132 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.060317 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.060442 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.060872 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/511ae511-f29a-4c04-b655-b6168b8622db-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.060987 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.062481 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.062517 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9d6c6648f58821c9c66f6b3e45e2571ce0b1381467c7b67dc15dc959d23e85e3/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.064659 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/511ae511-f29a-4c04-b655-b6168b8622db-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.064890 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/511ae511-f29a-4c04-b655-b6168b8622db-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.064965 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.066102 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.077651 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvc26\" (UniqueName: \"kubernetes.io/projected/511ae511-f29a-4c04-b655-b6168b8622db-kube-api-access-wvc26\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.111815 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc35951b-2d45-4cc4-b09a-13ea4c1a1324\") pod \"rabbitmq-cell1-server-0\" (UID: \"511ae511-f29a-4c04-b655-b6168b8622db\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.128290 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="285529a3-bf50-4815-b38e-e95a0d291fb6" path="/var/lib/kubelet/pods/285529a3-bf50-4815-b38e-e95a0d291fb6/volumes" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.129344 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e61fc48-3390-4b35-956f-843772ead36e" path="/var/lib/kubelet/pods/4e61fc48-3390-4b35-956f-843772ead36e/volumes" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.130799 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79aab840-0e43-4ef0-8fba-793c022f49c4" path="/var/lib/kubelet/pods/79aab840-0e43-4ef0-8fba-793c022f49c4/volumes" Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.301463 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.400577 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:07 crc kubenswrapper[4861]: W0129 07:58:07.658597 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod511ae511_f29a_4c04_b655_b6168b8622db.slice/crio-af3d3d228e35379af9e1632fd13e21260fca12e2eba762cc49117133d1640c65 WatchSource:0}: Error finding container af3d3d228e35379af9e1632fd13e21260fca12e2eba762cc49117133d1640c65: Status 404 returned error can't find the container with id af3d3d228e35379af9e1632fd13e21260fca12e2eba762cc49117133d1640c65 Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.658779 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.712148 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"511ae511-f29a-4c04-b655-b6168b8622db","Type":"ContainerStarted","Data":"af3d3d228e35379af9e1632fd13e21260fca12e2eba762cc49117133d1640c65"} Jan 29 07:58:07 crc kubenswrapper[4861]: I0129 07:58:07.713035 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"36a174b6-c79e-4486-81d0-16a0ddb54e96","Type":"ContainerStarted","Data":"e0b72e6dcb3110882b51a598f3f19b95e41e182102bced68e7d26a01e88a4f45"} Jan 29 07:58:09 crc kubenswrapper[4861]: I0129 07:58:09.737755 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"36a174b6-c79e-4486-81d0-16a0ddb54e96","Type":"ContainerStarted","Data":"cb194de5be356d251d07cb76a259a9baa04ea839a3e8ca56a23a0eed0d00a7e9"} Jan 29 07:58:09 crc kubenswrapper[4861]: I0129 07:58:09.740636 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"511ae511-f29a-4c04-b655-b6168b8622db","Type":"ContainerStarted","Data":"ba5f48f065edeb43ffc940b2896cf4ab08e93cf81e74150392a6d8d624e4b125"} Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.629903 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.630459 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.630500 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.631065 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.631137 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" gracePeriod=600 Jan 29 07:58:30 crc kubenswrapper[4861]: E0129 07:58:30.763181 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.947924 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" exitCode=0 Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.947987 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e"} Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.948039 4861 scope.go:117] "RemoveContainer" containerID="9f6877e337631f1868c599d5c4b3a382e0ff7bf36ed5bf0ed6ba9714e754b1cf" Jan 29 07:58:30 crc kubenswrapper[4861]: I0129 07:58:30.949590 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 07:58:30 crc kubenswrapper[4861]: E0129 07:58:30.950221 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:58:42 crc kubenswrapper[4861]: I0129 07:58:42.046144 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"36a174b6-c79e-4486-81d0-16a0ddb54e96","Type":"ContainerDied","Data":"cb194de5be356d251d07cb76a259a9baa04ea839a3e8ca56a23a0eed0d00a7e9"} Jan 29 07:58:42 crc kubenswrapper[4861]: I0129 07:58:42.046152 4861 generic.go:334] "Generic (PLEG): container finished" podID="36a174b6-c79e-4486-81d0-16a0ddb54e96" containerID="cb194de5be356d251d07cb76a259a9baa04ea839a3e8ca56a23a0eed0d00a7e9" exitCode=0 Jan 29 07:58:42 crc kubenswrapper[4861]: I0129 07:58:42.049973 4861 generic.go:334] "Generic (PLEG): container finished" podID="511ae511-f29a-4c04-b655-b6168b8622db" containerID="ba5f48f065edeb43ffc940b2896cf4ab08e93cf81e74150392a6d8d624e4b125" exitCode=0 Jan 29 07:58:42 crc kubenswrapper[4861]: I0129 07:58:42.050013 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"511ae511-f29a-4c04-b655-b6168b8622db","Type":"ContainerDied","Data":"ba5f48f065edeb43ffc940b2896cf4ab08e93cf81e74150392a6d8d624e4b125"} Jan 29 07:58:42 crc kubenswrapper[4861]: I0129 07:58:42.118713 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 07:58:42 crc kubenswrapper[4861]: E0129 07:58:42.122780 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:58:43 crc kubenswrapper[4861]: I0129 07:58:43.060411 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"511ae511-f29a-4c04-b655-b6168b8622db","Type":"ContainerStarted","Data":"9385e2d6d5f1004c2c8d891bd3064652d3cdd496f16f6e2cb6adbf2fcc4d8e8b"} Jan 29 07:58:43 crc kubenswrapper[4861]: I0129 07:58:43.060921 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:43 crc kubenswrapper[4861]: I0129 07:58:43.062353 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"36a174b6-c79e-4486-81d0-16a0ddb54e96","Type":"ContainerStarted","Data":"be6f3f90645ea711d56fb42afb8380853ffc3782dc06c87a13264b467e32e8e9"} Jan 29 07:58:43 crc kubenswrapper[4861]: I0129 07:58:43.062630 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 29 07:58:43 crc kubenswrapper[4861]: I0129 07:58:43.099610 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.099586254 podStartE2EDuration="37.099586254s" podCreationTimestamp="2026-01-29 07:58:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:58:43.083709067 +0000 UTC m=+5014.755203624" watchObservedRunningTime="2026-01-29 07:58:43.099586254 +0000 UTC m=+5014.771080811" Jan 29 07:58:43 crc kubenswrapper[4861]: I0129 07:58:43.136581 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.136533093 podStartE2EDuration="38.136533093s" podCreationTimestamp="2026-01-29 07:58:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 07:58:43.131500711 +0000 UTC m=+5014.802995268" watchObservedRunningTime="2026-01-29 07:58:43.136533093 +0000 UTC m=+5014.808027650" Jan 29 07:58:56 crc kubenswrapper[4861]: I0129 07:58:56.117177 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 07:58:56 crc kubenswrapper[4861]: E0129 07:58:56.117927 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:58:56 crc kubenswrapper[4861]: I0129 07:58:56.167999 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 29 07:58:57 crc kubenswrapper[4861]: I0129 07:58:57.405495 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 29 07:58:59 crc kubenswrapper[4861]: I0129 07:58:59.759146 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 29 07:58:59 crc kubenswrapper[4861]: I0129 07:58:59.761289 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 07:58:59 crc kubenswrapper[4861]: I0129 07:58:59.765645 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-gl2fq" Jan 29 07:58:59 crc kubenswrapper[4861]: I0129 07:58:59.773381 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 07:58:59 crc kubenswrapper[4861]: I0129 07:58:59.831886 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8gtc\" (UniqueName: \"kubernetes.io/projected/79cc0076-44e6-4ce7-ae82-b365980f6eb8-kube-api-access-f8gtc\") pod \"mariadb-client\" (UID: \"79cc0076-44e6-4ce7-ae82-b365980f6eb8\") " pod="openstack/mariadb-client" Jan 29 07:58:59 crc kubenswrapper[4861]: I0129 07:58:59.933214 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8gtc\" (UniqueName: \"kubernetes.io/projected/79cc0076-44e6-4ce7-ae82-b365980f6eb8-kube-api-access-f8gtc\") pod \"mariadb-client\" (UID: \"79cc0076-44e6-4ce7-ae82-b365980f6eb8\") " pod="openstack/mariadb-client" Jan 29 07:58:59 crc kubenswrapper[4861]: I0129 07:58:59.954705 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8gtc\" (UniqueName: \"kubernetes.io/projected/79cc0076-44e6-4ce7-ae82-b365980f6eb8-kube-api-access-f8gtc\") pod \"mariadb-client\" (UID: \"79cc0076-44e6-4ce7-ae82-b365980f6eb8\") " pod="openstack/mariadb-client" Jan 29 07:59:00 crc kubenswrapper[4861]: I0129 07:59:00.085228 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 07:59:00 crc kubenswrapper[4861]: I0129 07:59:00.442146 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 07:59:00 crc kubenswrapper[4861]: I0129 07:59:00.451175 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 07:59:01 crc kubenswrapper[4861]: I0129 07:59:01.219511 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"79cc0076-44e6-4ce7-ae82-b365980f6eb8","Type":"ContainerStarted","Data":"156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef"} Jan 29 07:59:01 crc kubenswrapper[4861]: I0129 07:59:01.219947 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"79cc0076-44e6-4ce7-ae82-b365980f6eb8","Type":"ContainerStarted","Data":"9c3e6dc5fac8c42d6e200ea78e3a5170a4520accaaa8e7147d420df841d319bd"} Jan 29 07:59:01 crc kubenswrapper[4861]: I0129 07:59:01.248136 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client" podStartSLOduration=1.7703541189999998 podStartE2EDuration="2.248110665s" podCreationTimestamp="2026-01-29 07:58:59 +0000 UTC" firstStartedPulling="2026-01-29 07:59:00.450742623 +0000 UTC m=+5032.122237220" lastFinishedPulling="2026-01-29 07:59:00.928499209 +0000 UTC m=+5032.599993766" observedRunningTime="2026-01-29 07:59:01.239469968 +0000 UTC m=+5032.910964575" watchObservedRunningTime="2026-01-29 07:59:01.248110665 +0000 UTC m=+5032.919605232" Jan 29 07:59:09 crc kubenswrapper[4861]: I0129 07:59:09.125005 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 07:59:09 crc kubenswrapper[4861]: E0129 07:59:09.126458 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:59:14 crc kubenswrapper[4861]: I0129 07:59:14.558970 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 29 07:59:14 crc kubenswrapper[4861]: I0129 07:59:14.559825 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-client" podUID="79cc0076-44e6-4ce7-ae82-b365980f6eb8" containerName="mariadb-client" containerID="cri-o://156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef" gracePeriod=30 Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.106181 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.227823 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8gtc\" (UniqueName: \"kubernetes.io/projected/79cc0076-44e6-4ce7-ae82-b365980f6eb8-kube-api-access-f8gtc\") pod \"79cc0076-44e6-4ce7-ae82-b365980f6eb8\" (UID: \"79cc0076-44e6-4ce7-ae82-b365980f6eb8\") " Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.241575 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79cc0076-44e6-4ce7-ae82-b365980f6eb8-kube-api-access-f8gtc" (OuterVolumeSpecName: "kube-api-access-f8gtc") pod "79cc0076-44e6-4ce7-ae82-b365980f6eb8" (UID: "79cc0076-44e6-4ce7-ae82-b365980f6eb8"). InnerVolumeSpecName "kube-api-access-f8gtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.330066 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8gtc\" (UniqueName: \"kubernetes.io/projected/79cc0076-44e6-4ce7-ae82-b365980f6eb8-kube-api-access-f8gtc\") on node \"crc\" DevicePath \"\"" Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.380933 4861 generic.go:334] "Generic (PLEG): container finished" podID="79cc0076-44e6-4ce7-ae82-b365980f6eb8" containerID="156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef" exitCode=143 Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.380999 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.380997 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"79cc0076-44e6-4ce7-ae82-b365980f6eb8","Type":"ContainerDied","Data":"156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef"} Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.381216 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"79cc0076-44e6-4ce7-ae82-b365980f6eb8","Type":"ContainerDied","Data":"9c3e6dc5fac8c42d6e200ea78e3a5170a4520accaaa8e7147d420df841d319bd"} Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.381263 4861 scope.go:117] "RemoveContainer" containerID="156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef" Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.405968 4861 scope.go:117] "RemoveContainer" containerID="156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef" Jan 29 07:59:15 crc kubenswrapper[4861]: E0129 07:59:15.407517 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef\": container with ID starting with 156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef not found: ID does not exist" containerID="156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef" Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.407556 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef"} err="failed to get container status \"156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef\": rpc error: code = NotFound desc = could not find container \"156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef\": container with ID starting with 156c8b60d31fc5df3168c28dc9cca9094ec493cae303cb15841a9f540d128eef not found: ID does not exist" Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.446113 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 29 07:59:15 crc kubenswrapper[4861]: I0129 07:59:15.455417 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 29 07:59:15 crc kubenswrapper[4861]: E0129 07:59:15.583172 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79cc0076_44e6_4ce7_ae82_b365980f6eb8.slice/crio-9c3e6dc5fac8c42d6e200ea78e3a5170a4520accaaa8e7147d420df841d319bd\": RecentStats: unable to find data in memory cache]" Jan 29 07:59:17 crc kubenswrapper[4861]: I0129 07:59:17.132120 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79cc0076-44e6-4ce7-ae82-b365980f6eb8" path="/var/lib/kubelet/pods/79cc0076-44e6-4ce7-ae82-b365980f6eb8/volumes" Jan 29 07:59:20 crc kubenswrapper[4861]: I0129 07:59:20.116694 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 07:59:20 crc kubenswrapper[4861]: E0129 07:59:20.117377 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:59:31 crc kubenswrapper[4861]: I0129 07:59:31.116650 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 07:59:31 crc kubenswrapper[4861]: E0129 07:59:31.117473 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:59:45 crc kubenswrapper[4861]: I0129 07:59:45.117444 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 07:59:45 crc kubenswrapper[4861]: E0129 07:59:45.118340 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 07:59:57 crc kubenswrapper[4861]: I0129 07:59:57.116734 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 07:59:57 crc kubenswrapper[4861]: E0129 07:59:57.117929 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.164758 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh"] Jan 29 08:00:00 crc kubenswrapper[4861]: E0129 08:00:00.166473 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79cc0076-44e6-4ce7-ae82-b365980f6eb8" containerName="mariadb-client" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.166550 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="79cc0076-44e6-4ce7-ae82-b365980f6eb8" containerName="mariadb-client" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.166746 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="79cc0076-44e6-4ce7-ae82-b365980f6eb8" containerName="mariadb-client" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.167380 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.171348 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.171692 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.214303 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh"] Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.296520 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a9e7e75-ce79-4d87-9819-043b04ded202-config-volume\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.296609 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a9e7e75-ce79-4d87-9819-043b04ded202-secret-volume\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.296807 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw4gt\" (UniqueName: \"kubernetes.io/projected/9a9e7e75-ce79-4d87-9819-043b04ded202-kube-api-access-gw4gt\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.399186 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw4gt\" (UniqueName: \"kubernetes.io/projected/9a9e7e75-ce79-4d87-9819-043b04ded202-kube-api-access-gw4gt\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.399300 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a9e7e75-ce79-4d87-9819-043b04ded202-config-volume\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.399366 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a9e7e75-ce79-4d87-9819-043b04ded202-secret-volume\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.400551 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a9e7e75-ce79-4d87-9819-043b04ded202-config-volume\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.413365 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a9e7e75-ce79-4d87-9819-043b04ded202-secret-volume\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.422505 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw4gt\" (UniqueName: \"kubernetes.io/projected/9a9e7e75-ce79-4d87-9819-043b04ded202-kube-api-access-gw4gt\") pod \"collect-profiles-29494560-6h8fh\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.508353 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.798652 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh"] Jan 29 08:00:00 crc kubenswrapper[4861]: I0129 08:00:00.824127 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" event={"ID":"9a9e7e75-ce79-4d87-9819-043b04ded202","Type":"ContainerStarted","Data":"a5aa7daf01cb09ba4daab0a8f18caa69f92958e98480216b69af8c2b43bc0679"} Jan 29 08:00:01 crc kubenswrapper[4861]: I0129 08:00:01.833456 4861 generic.go:334] "Generic (PLEG): container finished" podID="9a9e7e75-ce79-4d87-9819-043b04ded202" containerID="8f81ccaee78f79f9a9441d74c3a9700ea0caad086a65b57acbddaa3bb269b772" exitCode=0 Jan 29 08:00:01 crc kubenswrapper[4861]: I0129 08:00:01.833724 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" event={"ID":"9a9e7e75-ce79-4d87-9819-043b04ded202","Type":"ContainerDied","Data":"8f81ccaee78f79f9a9441d74c3a9700ea0caad086a65b57acbddaa3bb269b772"} Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.160994 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.250538 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a9e7e75-ce79-4d87-9819-043b04ded202-config-volume\") pod \"9a9e7e75-ce79-4d87-9819-043b04ded202\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.250657 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a9e7e75-ce79-4d87-9819-043b04ded202-secret-volume\") pod \"9a9e7e75-ce79-4d87-9819-043b04ded202\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.250722 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw4gt\" (UniqueName: \"kubernetes.io/projected/9a9e7e75-ce79-4d87-9819-043b04ded202-kube-api-access-gw4gt\") pod \"9a9e7e75-ce79-4d87-9819-043b04ded202\" (UID: \"9a9e7e75-ce79-4d87-9819-043b04ded202\") " Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.251174 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a9e7e75-ce79-4d87-9819-043b04ded202-config-volume" (OuterVolumeSpecName: "config-volume") pod "9a9e7e75-ce79-4d87-9819-043b04ded202" (UID: "9a9e7e75-ce79-4d87-9819-043b04ded202"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.255973 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a9e7e75-ce79-4d87-9819-043b04ded202-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9a9e7e75-ce79-4d87-9819-043b04ded202" (UID: "9a9e7e75-ce79-4d87-9819-043b04ded202"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.256640 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a9e7e75-ce79-4d87-9819-043b04ded202-kube-api-access-gw4gt" (OuterVolumeSpecName: "kube-api-access-gw4gt") pod "9a9e7e75-ce79-4d87-9819-043b04ded202" (UID: "9a9e7e75-ce79-4d87-9819-043b04ded202"). InnerVolumeSpecName "kube-api-access-gw4gt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.352420 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9a9e7e75-ce79-4d87-9819-043b04ded202-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.352488 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw4gt\" (UniqueName: \"kubernetes.io/projected/9a9e7e75-ce79-4d87-9819-043b04ded202-kube-api-access-gw4gt\") on node \"crc\" DevicePath \"\"" Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.352526 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9a9e7e75-ce79-4d87-9819-043b04ded202-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.851866 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" event={"ID":"9a9e7e75-ce79-4d87-9819-043b04ded202","Type":"ContainerDied","Data":"a5aa7daf01cb09ba4daab0a8f18caa69f92958e98480216b69af8c2b43bc0679"} Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.851930 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh" Jan 29 08:00:03 crc kubenswrapper[4861]: I0129 08:00:03.851938 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5aa7daf01cb09ba4daab0a8f18caa69f92958e98480216b69af8c2b43bc0679" Jan 29 08:00:04 crc kubenswrapper[4861]: I0129 08:00:04.259414 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747"] Jan 29 08:00:04 crc kubenswrapper[4861]: I0129 08:00:04.271013 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494515-wz747"] Jan 29 08:00:05 crc kubenswrapper[4861]: I0129 08:00:05.128167 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0caf9398-8b50-416e-ac1a-714fa4569e76" path="/var/lib/kubelet/pods/0caf9398-8b50-416e-ac1a-714fa4569e76/volumes" Jan 29 08:00:09 crc kubenswrapper[4861]: I0129 08:00:09.141107 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:00:09 crc kubenswrapper[4861]: E0129 08:00:09.141872 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:00:22 crc kubenswrapper[4861]: I0129 08:00:22.116497 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:00:22 crc kubenswrapper[4861]: E0129 08:00:22.117272 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:00:25 crc kubenswrapper[4861]: I0129 08:00:25.454311 4861 scope.go:117] "RemoveContainer" containerID="4c77653c8ccd0ec1923e5b5c13ac30c2439488d58eefee975e91f8e373121b87" Jan 29 08:00:37 crc kubenswrapper[4861]: I0129 08:00:37.117307 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:00:37 crc kubenswrapper[4861]: E0129 08:00:37.118379 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:00:50 crc kubenswrapper[4861]: I0129 08:00:50.117161 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:00:50 crc kubenswrapper[4861]: E0129 08:00:50.118147 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:01:03 crc kubenswrapper[4861]: I0129 08:01:03.117541 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:01:03 crc kubenswrapper[4861]: E0129 08:01:03.118571 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:01:18 crc kubenswrapper[4861]: I0129 08:01:18.116406 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:01:18 crc kubenswrapper[4861]: E0129 08:01:18.117445 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:01:25 crc kubenswrapper[4861]: I0129 08:01:25.555281 4861 scope.go:117] "RemoveContainer" containerID="b963d92e9fec10bd5afebccf5d776dff5bf2b6686c9655ba9022641fd6bc8824" Jan 29 08:01:31 crc kubenswrapper[4861]: I0129 08:01:31.116530 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:01:31 crc kubenswrapper[4861]: E0129 08:01:31.117560 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:01:42 crc kubenswrapper[4861]: I0129 08:01:42.117203 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:01:42 crc kubenswrapper[4861]: E0129 08:01:42.118264 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:01:53 crc kubenswrapper[4861]: I0129 08:01:53.116784 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:01:53 crc kubenswrapper[4861]: E0129 08:01:53.117732 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:02:08 crc kubenswrapper[4861]: I0129 08:02:08.116564 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:02:08 crc kubenswrapper[4861]: E0129 08:02:08.117587 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.657345 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vbkvn"] Jan 29 08:02:19 crc kubenswrapper[4861]: E0129 08:02:19.658550 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a9e7e75-ce79-4d87-9819-043b04ded202" containerName="collect-profiles" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.658569 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a9e7e75-ce79-4d87-9819-043b04ded202" containerName="collect-profiles" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.658780 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a9e7e75-ce79-4d87-9819-043b04ded202" containerName="collect-profiles" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.660286 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.674680 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbkvn"] Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.794228 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-utilities\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.794281 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-catalog-content\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.794530 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7xxw\" (UniqueName: \"kubernetes.io/projected/9b786c99-5539-4886-9a9e-a17ace8a430e-kube-api-access-h7xxw\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.897464 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-utilities\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.897563 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-catalog-content\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.897654 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7xxw\" (UniqueName: \"kubernetes.io/projected/9b786c99-5539-4886-9a9e-a17ace8a430e-kube-api-access-h7xxw\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.898216 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-utilities\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.898588 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-catalog-content\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.926742 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7xxw\" (UniqueName: \"kubernetes.io/projected/9b786c99-5539-4886-9a9e-a17ace8a430e-kube-api-access-h7xxw\") pod \"redhat-marketplace-vbkvn\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:19 crc kubenswrapper[4861]: I0129 08:02:19.991388 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:20 crc kubenswrapper[4861]: I0129 08:02:20.116909 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:02:20 crc kubenswrapper[4861]: E0129 08:02:20.117372 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:02:20 crc kubenswrapper[4861]: I0129 08:02:20.480905 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbkvn"] Jan 29 08:02:21 crc kubenswrapper[4861]: I0129 08:02:21.222337 4861 generic.go:334] "Generic (PLEG): container finished" podID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerID="9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335" exitCode=0 Jan 29 08:02:21 crc kubenswrapper[4861]: I0129 08:02:21.222442 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbkvn" event={"ID":"9b786c99-5539-4886-9a9e-a17ace8a430e","Type":"ContainerDied","Data":"9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335"} Jan 29 08:02:21 crc kubenswrapper[4861]: I0129 08:02:21.222650 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbkvn" event={"ID":"9b786c99-5539-4886-9a9e-a17ace8a430e","Type":"ContainerStarted","Data":"3656c76dcb2ea5712981a4d91d575368eefa227df6b6116e6ec2329188570b7d"} Jan 29 08:02:22 crc kubenswrapper[4861]: I0129 08:02:22.231507 4861 generic.go:334] "Generic (PLEG): container finished" podID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerID="38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222" exitCode=0 Jan 29 08:02:22 crc kubenswrapper[4861]: I0129 08:02:22.231566 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbkvn" event={"ID":"9b786c99-5539-4886-9a9e-a17ace8a430e","Type":"ContainerDied","Data":"38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222"} Jan 29 08:02:23 crc kubenswrapper[4861]: I0129 08:02:23.241433 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbkvn" event={"ID":"9b786c99-5539-4886-9a9e-a17ace8a430e","Type":"ContainerStarted","Data":"09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e"} Jan 29 08:02:23 crc kubenswrapper[4861]: I0129 08:02:23.272148 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vbkvn" podStartSLOduration=2.825589827 podStartE2EDuration="4.272127181s" podCreationTimestamp="2026-01-29 08:02:19 +0000 UTC" firstStartedPulling="2026-01-29 08:02:21.224028605 +0000 UTC m=+5232.895523182" lastFinishedPulling="2026-01-29 08:02:22.670565929 +0000 UTC m=+5234.342060536" observedRunningTime="2026-01-29 08:02:23.262303313 +0000 UTC m=+5234.933797910" watchObservedRunningTime="2026-01-29 08:02:23.272127181 +0000 UTC m=+5234.943621738" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.047428 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x9vdp"] Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.050612 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.061965 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x9vdp"] Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.113201 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-catalog-content\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.113574 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-utilities\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.113790 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7lq5\" (UniqueName: \"kubernetes.io/projected/e65b836e-ea8e-44e6-aca9-eecc977f87af-kube-api-access-q7lq5\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.215542 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7lq5\" (UniqueName: \"kubernetes.io/projected/e65b836e-ea8e-44e6-aca9-eecc977f87af-kube-api-access-q7lq5\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.215613 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-catalog-content\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.215743 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-utilities\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.216362 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-utilities\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.216631 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-catalog-content\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.249268 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7lq5\" (UniqueName: \"kubernetes.io/projected/e65b836e-ea8e-44e6-aca9-eecc977f87af-kube-api-access-q7lq5\") pod \"certified-operators-x9vdp\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.394462 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:26 crc kubenswrapper[4861]: I0129 08:02:26.853205 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x9vdp"] Jan 29 08:02:27 crc kubenswrapper[4861]: I0129 08:02:27.274153 4861 generic.go:334] "Generic (PLEG): container finished" podID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerID="6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f" exitCode=0 Jan 29 08:02:27 crc kubenswrapper[4861]: I0129 08:02:27.274200 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9vdp" event={"ID":"e65b836e-ea8e-44e6-aca9-eecc977f87af","Type":"ContainerDied","Data":"6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f"} Jan 29 08:02:27 crc kubenswrapper[4861]: I0129 08:02:27.274228 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9vdp" event={"ID":"e65b836e-ea8e-44e6-aca9-eecc977f87af","Type":"ContainerStarted","Data":"c99db22006e1aa32db4ddca4bc7404183420163392117ed228f825ff13509e0f"} Jan 29 08:02:28 crc kubenswrapper[4861]: I0129 08:02:28.294236 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9vdp" event={"ID":"e65b836e-ea8e-44e6-aca9-eecc977f87af","Type":"ContainerStarted","Data":"5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad"} Jan 29 08:02:29 crc kubenswrapper[4861]: I0129 08:02:29.304643 4861 generic.go:334] "Generic (PLEG): container finished" podID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerID="5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad" exitCode=0 Jan 29 08:02:29 crc kubenswrapper[4861]: I0129 08:02:29.304714 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9vdp" event={"ID":"e65b836e-ea8e-44e6-aca9-eecc977f87af","Type":"ContainerDied","Data":"5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad"} Jan 29 08:02:29 crc kubenswrapper[4861]: I0129 08:02:29.991796 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:29 crc kubenswrapper[4861]: I0129 08:02:29.992215 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:30 crc kubenswrapper[4861]: I0129 08:02:30.046618 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:30 crc kubenswrapper[4861]: I0129 08:02:30.318726 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9vdp" event={"ID":"e65b836e-ea8e-44e6-aca9-eecc977f87af","Type":"ContainerStarted","Data":"83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358"} Jan 29 08:02:30 crc kubenswrapper[4861]: I0129 08:02:30.350376 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x9vdp" podStartSLOduration=1.8782449730000002 podStartE2EDuration="4.350333236s" podCreationTimestamp="2026-01-29 08:02:26 +0000 UTC" firstStartedPulling="2026-01-29 08:02:27.276405883 +0000 UTC m=+5238.947900440" lastFinishedPulling="2026-01-29 08:02:29.748494146 +0000 UTC m=+5241.419988703" observedRunningTime="2026-01-29 08:02:30.346329601 +0000 UTC m=+5242.017824198" watchObservedRunningTime="2026-01-29 08:02:30.350333236 +0000 UTC m=+5242.021827833" Jan 29 08:02:30 crc kubenswrapper[4861]: I0129 08:02:30.379429 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.426563 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbkvn"] Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.426945 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vbkvn" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerName="registry-server" containerID="cri-o://09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e" gracePeriod=2 Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.644016 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.646570 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.658620 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-gl2fq" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.688290 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.731334 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnbtt\" (UniqueName: \"kubernetes.io/projected/7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce-kube-api-access-xnbtt\") pod \"mariadb-copy-data\" (UID: \"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce\") " pod="openstack/mariadb-copy-data" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.731439 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f9fb0473-c523-433c-a53c-5635dda890fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9fb0473-c523-433c-a53c-5635dda890fd\") pod \"mariadb-copy-data\" (UID: \"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce\") " pod="openstack/mariadb-copy-data" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.833611 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnbtt\" (UniqueName: \"kubernetes.io/projected/7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce-kube-api-access-xnbtt\") pod \"mariadb-copy-data\" (UID: \"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce\") " pod="openstack/mariadb-copy-data" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.833674 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f9fb0473-c523-433c-a53c-5635dda890fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9fb0473-c523-433c-a53c-5635dda890fd\") pod \"mariadb-copy-data\" (UID: \"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce\") " pod="openstack/mariadb-copy-data" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.837661 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.837732 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f9fb0473-c523-433c-a53c-5635dda890fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9fb0473-c523-433c-a53c-5635dda890fd\") pod \"mariadb-copy-data\" (UID: \"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/41cb0e6df4309987ddaa341f1c9e05f83ef4cbd0763b0ae2e48fd7ae46811803/globalmount\"" pod="openstack/mariadb-copy-data" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.863203 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnbtt\" (UniqueName: \"kubernetes.io/projected/7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce-kube-api-access-xnbtt\") pod \"mariadb-copy-data\" (UID: \"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce\") " pod="openstack/mariadb-copy-data" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.876982 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f9fb0473-c523-433c-a53c-5635dda890fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9fb0473-c523-433c-a53c-5635dda890fd\") pod \"mariadb-copy-data\" (UID: \"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce\") " pod="openstack/mariadb-copy-data" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.909940 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.935781 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-utilities\") pod \"9b786c99-5539-4886-9a9e-a17ace8a430e\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.935834 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7xxw\" (UniqueName: \"kubernetes.io/projected/9b786c99-5539-4886-9a9e-a17ace8a430e-kube-api-access-h7xxw\") pod \"9b786c99-5539-4886-9a9e-a17ace8a430e\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.936249 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-catalog-content\") pod \"9b786c99-5539-4886-9a9e-a17ace8a430e\" (UID: \"9b786c99-5539-4886-9a9e-a17ace8a430e\") " Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.937473 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-utilities" (OuterVolumeSpecName: "utilities") pod "9b786c99-5539-4886-9a9e-a17ace8a430e" (UID: "9b786c99-5539-4886-9a9e-a17ace8a430e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.945349 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b786c99-5539-4886-9a9e-a17ace8a430e-kube-api-access-h7xxw" (OuterVolumeSpecName: "kube-api-access-h7xxw") pod "9b786c99-5539-4886-9a9e-a17ace8a430e" (UID: "9b786c99-5539-4886-9a9e-a17ace8a430e"). InnerVolumeSpecName "kube-api-access-h7xxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.963983 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b786c99-5539-4886-9a9e-a17ace8a430e" (UID: "9b786c99-5539-4886-9a9e-a17ace8a430e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:02:32 crc kubenswrapper[4861]: I0129 08:02:32.979342 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.038211 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.038250 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b786c99-5539-4886-9a9e-a17ace8a430e-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.038263 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7xxw\" (UniqueName: \"kubernetes.io/projected/9b786c99-5539-4886-9a9e-a17ace8a430e-kube-api-access-h7xxw\") on node \"crc\" DevicePath \"\"" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.350621 4861 generic.go:334] "Generic (PLEG): container finished" podID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerID="09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e" exitCode=0 Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.350697 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbkvn" event={"ID":"9b786c99-5539-4886-9a9e-a17ace8a430e","Type":"ContainerDied","Data":"09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e"} Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.350729 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vbkvn" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.351057 4861 scope.go:117] "RemoveContainer" containerID="09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.351036 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vbkvn" event={"ID":"9b786c99-5539-4886-9a9e-a17ace8a430e","Type":"ContainerDied","Data":"3656c76dcb2ea5712981a4d91d575368eefa227df6b6116e6ec2329188570b7d"} Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.388175 4861 scope.go:117] "RemoveContainer" containerID="38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.395013 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbkvn"] Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.403057 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vbkvn"] Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.418109 4861 scope.go:117] "RemoveContainer" containerID="9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.444169 4861 scope.go:117] "RemoveContainer" containerID="09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e" Jan 29 08:02:33 crc kubenswrapper[4861]: E0129 08:02:33.445669 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e\": container with ID starting with 09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e not found: ID does not exist" containerID="09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.445738 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e"} err="failed to get container status \"09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e\": rpc error: code = NotFound desc = could not find container \"09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e\": container with ID starting with 09fb82b5ee8e7e4e1b134c347e1c8579908e45ba327dfef759b495435baa967e not found: ID does not exist" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.445782 4861 scope.go:117] "RemoveContainer" containerID="38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222" Jan 29 08:02:33 crc kubenswrapper[4861]: E0129 08:02:33.446462 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222\": container with ID starting with 38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222 not found: ID does not exist" containerID="38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.446554 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222"} err="failed to get container status \"38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222\": rpc error: code = NotFound desc = could not find container \"38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222\": container with ID starting with 38dc50e2bcbea7d13a9cd7698146d6acaba2a46b960009cfeef7b309fe579222 not found: ID does not exist" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.446633 4861 scope.go:117] "RemoveContainer" containerID="9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335" Jan 29 08:02:33 crc kubenswrapper[4861]: E0129 08:02:33.447272 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335\": container with ID starting with 9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335 not found: ID does not exist" containerID="9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.447368 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335"} err="failed to get container status \"9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335\": rpc error: code = NotFound desc = could not find container \"9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335\": container with ID starting with 9fd83d1dc95995355f545f8716e576feb9e821a5c804dbeb1f074c227ccf9335 not found: ID does not exist" Jan 29 08:02:33 crc kubenswrapper[4861]: I0129 08:02:33.571165 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 29 08:02:33 crc kubenswrapper[4861]: W0129 08:02:33.579262 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c2eaff6_8fb9_4112_bdbd_e9dedc4233ce.slice/crio-50fa1edce9ace38eb7c8d3f4c23173fc9fec0bee229dcbaef673ae323277e101 WatchSource:0}: Error finding container 50fa1edce9ace38eb7c8d3f4c23173fc9fec0bee229dcbaef673ae323277e101: Status 404 returned error can't find the container with id 50fa1edce9ace38eb7c8d3f4c23173fc9fec0bee229dcbaef673ae323277e101 Jan 29 08:02:34 crc kubenswrapper[4861]: I0129 08:02:34.363754 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce","Type":"ContainerStarted","Data":"d4d765f1d26ca0d58fb59e5fc5ddf0f3b2ee044ccedee98a6c5a2be83862eb86"} Jan 29 08:02:34 crc kubenswrapper[4861]: I0129 08:02:34.364024 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce","Type":"ContainerStarted","Data":"50fa1edce9ace38eb7c8d3f4c23173fc9fec0bee229dcbaef673ae323277e101"} Jan 29 08:02:34 crc kubenswrapper[4861]: I0129 08:02:34.390015 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=3.389995659 podStartE2EDuration="3.389995659s" podCreationTimestamp="2026-01-29 08:02:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:02:34.389674401 +0000 UTC m=+5246.061169018" watchObservedRunningTime="2026-01-29 08:02:34.389995659 +0000 UTC m=+5246.061490216" Jan 29 08:02:35 crc kubenswrapper[4861]: I0129 08:02:35.116681 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:02:35 crc kubenswrapper[4861]: E0129 08:02:35.116987 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:02:35 crc kubenswrapper[4861]: I0129 08:02:35.127159 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" path="/var/lib/kubelet/pods/9b786c99-5539-4886-9a9e-a17ace8a430e/volumes" Jan 29 08:02:36 crc kubenswrapper[4861]: I0129 08:02:36.396614 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:36 crc kubenswrapper[4861]: I0129 08:02:36.397342 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:36 crc kubenswrapper[4861]: I0129 08:02:36.482693 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.077677 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:37 crc kubenswrapper[4861]: E0129 08:02:37.078439 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerName="extract-content" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.078485 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerName="extract-content" Jan 29 08:02:37 crc kubenswrapper[4861]: E0129 08:02:37.078590 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerName="extract-utilities" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.078629 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerName="extract-utilities" Jan 29 08:02:37 crc kubenswrapper[4861]: E0129 08:02:37.078656 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerName="registry-server" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.078674 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerName="registry-server" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.079189 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b786c99-5539-4886-9a9e-a17ace8a430e" containerName="registry-server" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.080509 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.094156 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.112328 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwrxk\" (UniqueName: \"kubernetes.io/projected/92ab5ae3-5831-41cd-902c-2744583a5708-kube-api-access-kwrxk\") pod \"mariadb-client\" (UID: \"92ab5ae3-5831-41cd-902c-2744583a5708\") " pod="openstack/mariadb-client" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.213327 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwrxk\" (UniqueName: \"kubernetes.io/projected/92ab5ae3-5831-41cd-902c-2744583a5708-kube-api-access-kwrxk\") pod \"mariadb-client\" (UID: \"92ab5ae3-5831-41cd-902c-2744583a5708\") " pod="openstack/mariadb-client" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.244144 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwrxk\" (UniqueName: \"kubernetes.io/projected/92ab5ae3-5831-41cd-902c-2744583a5708-kube-api-access-kwrxk\") pod \"mariadb-client\" (UID: \"92ab5ae3-5831-41cd-902c-2744583a5708\") " pod="openstack/mariadb-client" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.408791 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.469670 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.562516 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x9vdp"] Jan 29 08:02:37 crc kubenswrapper[4861]: I0129 08:02:37.896436 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:38 crc kubenswrapper[4861]: I0129 08:02:38.419922 4861 generic.go:334] "Generic (PLEG): container finished" podID="92ab5ae3-5831-41cd-902c-2744583a5708" containerID="74cfa304a062c06376023b0dc73208636f05cb2d84b31845ae51644115b57b51" exitCode=0 Jan 29 08:02:38 crc kubenswrapper[4861]: I0129 08:02:38.419999 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"92ab5ae3-5831-41cd-902c-2744583a5708","Type":"ContainerDied","Data":"74cfa304a062c06376023b0dc73208636f05cb2d84b31845ae51644115b57b51"} Jan 29 08:02:38 crc kubenswrapper[4861]: I0129 08:02:38.420392 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"92ab5ae3-5831-41cd-902c-2744583a5708","Type":"ContainerStarted","Data":"2dc85de12caf84dd4089dcfe4388507184779ccb7d7d6c5de562b5b7fcaa0150"} Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.430865 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x9vdp" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerName="registry-server" containerID="cri-o://83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358" gracePeriod=2 Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.795128 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.825757 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_92ab5ae3-5831-41cd-902c-2744583a5708/mariadb-client/0.log" Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.851313 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.864232 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.965786 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwrxk\" (UniqueName: \"kubernetes.io/projected/92ab5ae3-5831-41cd-902c-2744583a5708-kube-api-access-kwrxk\") pod \"92ab5ae3-5831-41cd-902c-2744583a5708\" (UID: \"92ab5ae3-5831-41cd-902c-2744583a5708\") " Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.973857 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92ab5ae3-5831-41cd-902c-2744583a5708-kube-api-access-kwrxk" (OuterVolumeSpecName: "kube-api-access-kwrxk") pod "92ab5ae3-5831-41cd-902c-2744583a5708" (UID: "92ab5ae3-5831-41cd-902c-2744583a5708"). InnerVolumeSpecName "kube-api-access-kwrxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.979390 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:39 crc kubenswrapper[4861]: E0129 08:02:39.979992 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92ab5ae3-5831-41cd-902c-2744583a5708" containerName="mariadb-client" Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.980019 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="92ab5ae3-5831-41cd-902c-2744583a5708" containerName="mariadb-client" Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.980288 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="92ab5ae3-5831-41cd-902c-2744583a5708" containerName="mariadb-client" Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.980717 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.981119 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 08:02:39 crc kubenswrapper[4861]: I0129 08:02:39.989371 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.070438 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5vcx\" (UniqueName: \"kubernetes.io/projected/2ed110c4-c369-4e64-be13-4fe2edd06901-kube-api-access-j5vcx\") pod \"mariadb-client\" (UID: \"2ed110c4-c369-4e64-be13-4fe2edd06901\") " pod="openstack/mariadb-client" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.072448 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwrxk\" (UniqueName: \"kubernetes.io/projected/92ab5ae3-5831-41cd-902c-2744583a5708-kube-api-access-kwrxk\") on node \"crc\" DevicePath \"\"" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.174063 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-catalog-content\") pod \"e65b836e-ea8e-44e6-aca9-eecc977f87af\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.174141 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7lq5\" (UniqueName: \"kubernetes.io/projected/e65b836e-ea8e-44e6-aca9-eecc977f87af-kube-api-access-q7lq5\") pod \"e65b836e-ea8e-44e6-aca9-eecc977f87af\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.174179 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-utilities\") pod \"e65b836e-ea8e-44e6-aca9-eecc977f87af\" (UID: \"e65b836e-ea8e-44e6-aca9-eecc977f87af\") " Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.174603 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5vcx\" (UniqueName: \"kubernetes.io/projected/2ed110c4-c369-4e64-be13-4fe2edd06901-kube-api-access-j5vcx\") pod \"mariadb-client\" (UID: \"2ed110c4-c369-4e64-be13-4fe2edd06901\") " pod="openstack/mariadb-client" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.176172 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-utilities" (OuterVolumeSpecName: "utilities") pod "e65b836e-ea8e-44e6-aca9-eecc977f87af" (UID: "e65b836e-ea8e-44e6-aca9-eecc977f87af"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.179127 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e65b836e-ea8e-44e6-aca9-eecc977f87af-kube-api-access-q7lq5" (OuterVolumeSpecName: "kube-api-access-q7lq5") pod "e65b836e-ea8e-44e6-aca9-eecc977f87af" (UID: "e65b836e-ea8e-44e6-aca9-eecc977f87af"). InnerVolumeSpecName "kube-api-access-q7lq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.196558 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5vcx\" (UniqueName: \"kubernetes.io/projected/2ed110c4-c369-4e64-be13-4fe2edd06901-kube-api-access-j5vcx\") pod \"mariadb-client\" (UID: \"2ed110c4-c369-4e64-be13-4fe2edd06901\") " pod="openstack/mariadb-client" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.248309 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e65b836e-ea8e-44e6-aca9-eecc977f87af" (UID: "e65b836e-ea8e-44e6-aca9-eecc977f87af"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.277197 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.277241 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7lq5\" (UniqueName: \"kubernetes.io/projected/e65b836e-ea8e-44e6-aca9-eecc977f87af-kube-api-access-q7lq5\") on node \"crc\" DevicePath \"\"" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.277261 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e65b836e-ea8e-44e6-aca9-eecc977f87af-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.294714 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.468421 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.469216 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2dc85de12caf84dd4089dcfe4388507184779ccb7d7d6c5de562b5b7fcaa0150" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.496573 4861 generic.go:334] "Generic (PLEG): container finished" podID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerID="83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358" exitCode=0 Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.497012 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9vdp" event={"ID":"e65b836e-ea8e-44e6-aca9-eecc977f87af","Type":"ContainerDied","Data":"83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358"} Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.497063 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9vdp" event={"ID":"e65b836e-ea8e-44e6-aca9-eecc977f87af","Type":"ContainerDied","Data":"c99db22006e1aa32db4ddca4bc7404183420163392117ed228f825ff13509e0f"} Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.497104 4861 scope.go:117] "RemoveContainer" containerID="83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.497405 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9vdp" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.520817 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="92ab5ae3-5831-41cd-902c-2744583a5708" podUID="2ed110c4-c369-4e64-be13-4fe2edd06901" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.579749 4861 scope.go:117] "RemoveContainer" containerID="5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.612018 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x9vdp"] Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.649375 4861 scope.go:117] "RemoveContainer" containerID="6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.659489 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x9vdp"] Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.685014 4861 scope.go:117] "RemoveContainer" containerID="83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358" Jan 29 08:02:40 crc kubenswrapper[4861]: E0129 08:02:40.685644 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358\": container with ID starting with 83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358 not found: ID does not exist" containerID="83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.685667 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358"} err="failed to get container status \"83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358\": rpc error: code = NotFound desc = could not find container \"83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358\": container with ID starting with 83eb808b66a42e6fd3a2b6605a1c7f93794726e12d6fb79f03ed5dce56cb9358 not found: ID does not exist" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.685685 4861 scope.go:117] "RemoveContainer" containerID="5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad" Jan 29 08:02:40 crc kubenswrapper[4861]: E0129 08:02:40.685946 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad\": container with ID starting with 5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad not found: ID does not exist" containerID="5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.685965 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad"} err="failed to get container status \"5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad\": rpc error: code = NotFound desc = could not find container \"5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad\": container with ID starting with 5753e26f045f620089af77901af7e8e05955584729837c90bd2ddd4b29c713ad not found: ID does not exist" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.685977 4861 scope.go:117] "RemoveContainer" containerID="6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f" Jan 29 08:02:40 crc kubenswrapper[4861]: E0129 08:02:40.686334 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f\": container with ID starting with 6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f not found: ID does not exist" containerID="6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.686352 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f"} err="failed to get container status \"6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f\": rpc error: code = NotFound desc = could not find container \"6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f\": container with ID starting with 6b06731e9e0e54f17647bcd3ea10cbcd6f4cf82ef7ecec693823e1ebda72f62f not found: ID does not exist" Jan 29 08:02:40 crc kubenswrapper[4861]: I0129 08:02:40.933563 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:40 crc kubenswrapper[4861]: W0129 08:02:40.944410 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ed110c4_c369_4e64_be13_4fe2edd06901.slice/crio-92c8c76fddc837d2a7e89dcd7b68d4d176b9da4817c3e7e018e0ec6bd7cac378 WatchSource:0}: Error finding container 92c8c76fddc837d2a7e89dcd7b68d4d176b9da4817c3e7e018e0ec6bd7cac378: Status 404 returned error can't find the container with id 92c8c76fddc837d2a7e89dcd7b68d4d176b9da4817c3e7e018e0ec6bd7cac378 Jan 29 08:02:41 crc kubenswrapper[4861]: I0129 08:02:41.126354 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92ab5ae3-5831-41cd-902c-2744583a5708" path="/var/lib/kubelet/pods/92ab5ae3-5831-41cd-902c-2744583a5708/volumes" Jan 29 08:02:41 crc kubenswrapper[4861]: I0129 08:02:41.126923 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" path="/var/lib/kubelet/pods/e65b836e-ea8e-44e6-aca9-eecc977f87af/volumes" Jan 29 08:02:41 crc kubenswrapper[4861]: I0129 08:02:41.510450 4861 generic.go:334] "Generic (PLEG): container finished" podID="2ed110c4-c369-4e64-be13-4fe2edd06901" containerID="7557e980de1a6dd37a28f9e5356b5275fffd3a7069bf200f2af285d26a7cdc2b" exitCode=0 Jan 29 08:02:41 crc kubenswrapper[4861]: I0129 08:02:41.510534 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"2ed110c4-c369-4e64-be13-4fe2edd06901","Type":"ContainerDied","Data":"7557e980de1a6dd37a28f9e5356b5275fffd3a7069bf200f2af285d26a7cdc2b"} Jan 29 08:02:41 crc kubenswrapper[4861]: I0129 08:02:41.510903 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"2ed110c4-c369-4e64-be13-4fe2edd06901","Type":"ContainerStarted","Data":"92c8c76fddc837d2a7e89dcd7b68d4d176b9da4817c3e7e018e0ec6bd7cac378"} Jan 29 08:02:42 crc kubenswrapper[4861]: I0129 08:02:42.844085 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 08:02:42 crc kubenswrapper[4861]: I0129 08:02:42.912509 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_2ed110c4-c369-4e64-be13-4fe2edd06901/mariadb-client/0.log" Jan 29 08:02:42 crc kubenswrapper[4861]: I0129 08:02:42.939882 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:42 crc kubenswrapper[4861]: I0129 08:02:42.947235 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 29 08:02:43 crc kubenswrapper[4861]: I0129 08:02:43.024945 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5vcx\" (UniqueName: \"kubernetes.io/projected/2ed110c4-c369-4e64-be13-4fe2edd06901-kube-api-access-j5vcx\") pod \"2ed110c4-c369-4e64-be13-4fe2edd06901\" (UID: \"2ed110c4-c369-4e64-be13-4fe2edd06901\") " Jan 29 08:02:43 crc kubenswrapper[4861]: I0129 08:02:43.031115 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ed110c4-c369-4e64-be13-4fe2edd06901-kube-api-access-j5vcx" (OuterVolumeSpecName: "kube-api-access-j5vcx") pod "2ed110c4-c369-4e64-be13-4fe2edd06901" (UID: "2ed110c4-c369-4e64-be13-4fe2edd06901"). InnerVolumeSpecName "kube-api-access-j5vcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:02:43 crc kubenswrapper[4861]: I0129 08:02:43.127448 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5vcx\" (UniqueName: \"kubernetes.io/projected/2ed110c4-c369-4e64-be13-4fe2edd06901-kube-api-access-j5vcx\") on node \"crc\" DevicePath \"\"" Jan 29 08:02:43 crc kubenswrapper[4861]: I0129 08:02:43.135350 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ed110c4-c369-4e64-be13-4fe2edd06901" path="/var/lib/kubelet/pods/2ed110c4-c369-4e64-be13-4fe2edd06901/volumes" Jan 29 08:02:43 crc kubenswrapper[4861]: I0129 08:02:43.531175 4861 scope.go:117] "RemoveContainer" containerID="7557e980de1a6dd37a28f9e5356b5275fffd3a7069bf200f2af285d26a7cdc2b" Jan 29 08:02:43 crc kubenswrapper[4861]: I0129 08:02:43.531341 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 29 08:02:50 crc kubenswrapper[4861]: I0129 08:02:50.116800 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:02:50 crc kubenswrapper[4861]: E0129 08:02:50.117501 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:03:01 crc kubenswrapper[4861]: I0129 08:03:01.117167 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:03:01 crc kubenswrapper[4861]: E0129 08:03:01.118333 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:03:12 crc kubenswrapper[4861]: E0129 08:03:12.629779 4861 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.80:59208->38.102.83.80:46667: write tcp 38.102.83.80:59208->38.102.83.80:46667: write: broken pipe Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.914936 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 08:03:15 crc kubenswrapper[4861]: E0129 08:03:15.915871 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ed110c4-c369-4e64-be13-4fe2edd06901" containerName="mariadb-client" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.915902 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ed110c4-c369-4e64-be13-4fe2edd06901" containerName="mariadb-client" Jan 29 08:03:15 crc kubenswrapper[4861]: E0129 08:03:15.915946 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerName="extract-content" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.915962 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerName="extract-content" Jan 29 08:03:15 crc kubenswrapper[4861]: E0129 08:03:15.916011 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerName="extract-utilities" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.916026 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerName="extract-utilities" Jan 29 08:03:15 crc kubenswrapper[4861]: E0129 08:03:15.916053 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerName="registry-server" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.916069 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerName="registry-server" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.916548 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ed110c4-c369-4e64-be13-4fe2edd06901" containerName="mariadb-client" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.916583 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e65b836e-ea8e-44e6-aca9-eecc977f87af" containerName="registry-server" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.918574 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.922012 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.922794 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-7c6jl" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.923172 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.925216 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.926034 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.947418 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.950176 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.960100 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.969029 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.970602 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:15 crc kubenswrapper[4861]: I0129 08:03:15.984739 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.003835 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.049209 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.049288 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.049319 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-89a3f800-8c72-4424-ad14-cf626e5e2266\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-89a3f800-8c72-4424-ad14-cf626e5e2266\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.049346 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.049442 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.049464 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-config\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.049490 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps9sw\" (UniqueName: \"kubernetes.io/projected/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-kube-api-access-ps9sw\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.049515 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.116707 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:03:16 crc kubenswrapper[4861]: E0129 08:03:16.117406 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151140 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a9e304d1-7510-4764-915b-9a9d44d43587-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151206 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlvmp\" (UniqueName: \"kubernetes.io/projected/a9e304d1-7510-4764-915b-9a9d44d43587-kube-api-access-wlvmp\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151263 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0c545f3d-6d43-472c-845e-aebeefa59658\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c545f3d-6d43-472c-845e-aebeefa59658\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151288 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9e304d1-7510-4764-915b-9a9d44d43587-config\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151312 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151332 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a9e304d1-7510-4764-915b-9a9d44d43587-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151362 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151394 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151417 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151447 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151473 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151499 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-89a3f800-8c72-4424-ad14-cf626e5e2266\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-89a3f800-8c72-4424-ad14-cf626e5e2266\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151525 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151562 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqzth\" (UniqueName: \"kubernetes.io/projected/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-kube-api-access-lqzth\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151587 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151607 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-config\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151632 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-config\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151653 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151677 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps9sw\" (UniqueName: \"kubernetes.io/projected/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-kube-api-access-ps9sw\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151702 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-90599a0f-b0ba-4308-88ab-b81d2bdd4745\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90599a0f-b0ba-4308-88ab-b81d2bdd4745\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151729 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151767 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151791 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.151813 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.153146 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.153549 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-config\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.153953 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.156184 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.156277 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-89a3f800-8c72-4424-ad14-cf626e5e2266\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-89a3f800-8c72-4424-ad14-cf626e5e2266\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b5688805dcebc9b9f5035b61afb0923b4c9d354af8ce2c4cb29005517108df01/globalmount\"" pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.159090 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.160156 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.160795 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.169306 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps9sw\" (UniqueName: \"kubernetes.io/projected/36c95ecd-713c-48cd-9a1d-3d4d7619f73a-kube-api-access-ps9sw\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.201094 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-89a3f800-8c72-4424-ad14-cf626e5e2266\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-89a3f800-8c72-4424-ad14-cf626e5e2266\") pod \"ovsdbserver-nb-0\" (UID: \"36c95ecd-713c-48cd-9a1d-3d4d7619f73a\") " pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.249931 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.253303 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqzth\" (UniqueName: \"kubernetes.io/projected/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-kube-api-access-lqzth\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.253747 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-config\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.254863 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-config\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.254924 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.254956 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-90599a0f-b0ba-4308-88ab-b81d2bdd4745\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90599a0f-b0ba-4308-88ab-b81d2bdd4745\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255014 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255037 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255056 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255214 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a9e304d1-7510-4764-915b-9a9d44d43587-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255254 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlvmp\" (UniqueName: \"kubernetes.io/projected/a9e304d1-7510-4764-915b-9a9d44d43587-kube-api-access-wlvmp\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255303 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0c545f3d-6d43-472c-845e-aebeefa59658\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c545f3d-6d43-472c-845e-aebeefa59658\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255337 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9e304d1-7510-4764-915b-9a9d44d43587-config\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255361 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255382 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a9e304d1-7510-4764-915b-9a9d44d43587-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255427 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255450 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.255476 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.256947 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a9e304d1-7510-4764-915b-9a9d44d43587-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.257290 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.257521 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.257562 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a9e304d1-7510-4764-915b-9a9d44d43587-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.259101 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.259512 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9e304d1-7510-4764-915b-9a9d44d43587-config\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.261241 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.262051 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.263349 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.263402 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0c545f3d-6d43-472c-845e-aebeefa59658\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c545f3d-6d43-472c-845e-aebeefa59658\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/eee4a3f7150ddf3f3cfa9d481c81967e8f5dd1550e1121401735007831f760c7/globalmount\"" pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.263420 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.263866 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9e304d1-7510-4764-915b-9a9d44d43587-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.263989 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.264042 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-90599a0f-b0ba-4308-88ab-b81d2bdd4745\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90599a0f-b0ba-4308-88ab-b81d2bdd4745\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/4ca3a537ed5da6dd4f365e600c470feaf3d5f7dd9a824b954cc9e61791545b57/globalmount\"" pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.267505 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.276633 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqzth\" (UniqueName: \"kubernetes.io/projected/ac0e53a1-0e72-4889-9306-50b1b0f60ee0-kube-api-access-lqzth\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.284428 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlvmp\" (UniqueName: \"kubernetes.io/projected/a9e304d1-7510-4764-915b-9a9d44d43587-kube-api-access-wlvmp\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.338393 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0c545f3d-6d43-472c-845e-aebeefa59658\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c545f3d-6d43-472c-845e-aebeefa59658\") pod \"ovsdbserver-nb-1\" (UID: \"a9e304d1-7510-4764-915b-9a9d44d43587\") " pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.363243 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-90599a0f-b0ba-4308-88ab-b81d2bdd4745\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90599a0f-b0ba-4308-88ab-b81d2bdd4745\") pod \"ovsdbserver-nb-2\" (UID: \"ac0e53a1-0e72-4889-9306-50b1b0f60ee0\") " pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.591668 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.601354 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:16 crc kubenswrapper[4861]: I0129 08:03:16.911679 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.005805 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.217530 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 29 08:03:17 crc kubenswrapper[4861]: W0129 08:03:17.233808 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac0e53a1_0e72_4889_9306_50b1b0f60ee0.slice/crio-e5da089fa9a8245139df1414f95e80dfa6e0df7c166a51aa0048acc8f9748091 WatchSource:0}: Error finding container e5da089fa9a8245139df1414f95e80dfa6e0df7c166a51aa0048acc8f9748091: Status 404 returned error can't find the container with id e5da089fa9a8245139df1414f95e80dfa6e0df7c166a51aa0048acc8f9748091 Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.420373 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.421731 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.424747 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.424890 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-djl8l" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.425059 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.425288 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.439895 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.481123 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.482764 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.487039 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.488295 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.502449 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.508810 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576763 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0574e13a-4bd0-4ff5-ad2d-266144e22f3b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0574e13a-4bd0-4ff5-ad2d-266144e22f3b\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576813 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576840 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576869 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/015bc043-2935-42e9-8259-2a2a113e30a8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576895 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22ksj\" (UniqueName: \"kubernetes.io/projected/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-kube-api-access-22ksj\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576918 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576944 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xk7g7\" (UniqueName: \"kubernetes.io/projected/015bc043-2935-42e9-8259-2a2a113e30a8-kube-api-access-xk7g7\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576959 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/015bc043-2935-42e9-8259-2a2a113e30a8-config\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576975 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e378828-d06c-4a89-9ca3-f134cd743d94-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.576989 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577005 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577026 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-config\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577044 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577062 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9b6ef75b-d5d5-48ed-8b83-c7bcec92c7f7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9b6ef75b-d5d5-48ed-8b83-c7bcec92c7f7\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577207 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a47d0273-6c1a-4778-919c-5b7f07c0582e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a47d0273-6c1a-4778-919c-5b7f07c0582e\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577228 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577248 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577267 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577283 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b267\" (UniqueName: \"kubernetes.io/projected/0e378828-d06c-4a89-9ca3-f134cd743d94-kube-api-access-5b267\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577301 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577320 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0e378828-d06c-4a89-9ca3-f134cd743d94-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577340 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577359 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e378828-d06c-4a89-9ca3-f134cd743d94-config\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.577373 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/015bc043-2935-42e9-8259-2a2a113e30a8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.678643 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.678700 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.678724 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b267\" (UniqueName: \"kubernetes.io/projected/0e378828-d06c-4a89-9ca3-f134cd743d94-kube-api-access-5b267\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.678746 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.678765 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0e378828-d06c-4a89-9ca3-f134cd743d94-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679212 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679233 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679251 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e378828-d06c-4a89-9ca3-f134cd743d94-config\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679278 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/015bc043-2935-42e9-8259-2a2a113e30a8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679317 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0574e13a-4bd0-4ff5-ad2d-266144e22f3b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0574e13a-4bd0-4ff5-ad2d-266144e22f3b\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679393 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679393 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0e378828-d06c-4a89-9ca3-f134cd743d94-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679572 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679715 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/015bc043-2935-42e9-8259-2a2a113e30a8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679921 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/015bc043-2935-42e9-8259-2a2a113e30a8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.679982 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22ksj\" (UniqueName: \"kubernetes.io/projected/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-kube-api-access-22ksj\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680019 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680088 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xk7g7\" (UniqueName: \"kubernetes.io/projected/015bc043-2935-42e9-8259-2a2a113e30a8-kube-api-access-xk7g7\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680119 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/015bc043-2935-42e9-8259-2a2a113e30a8-config\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680145 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e378828-d06c-4a89-9ca3-f134cd743d94-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680203 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680230 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680270 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-config\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680298 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680325 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9b6ef75b-d5d5-48ed-8b83-c7bcec92c7f7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9b6ef75b-d5d5-48ed-8b83-c7bcec92c7f7\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680363 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a47d0273-6c1a-4778-919c-5b7f07c0582e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a47d0273-6c1a-4778-919c-5b7f07c0582e\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680390 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680615 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/015bc043-2935-42e9-8259-2a2a113e30a8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.680992 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/015bc043-2935-42e9-8259-2a2a113e30a8-config\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.681294 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-config\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.681922 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.681980 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e378828-d06c-4a89-9ca3-f134cd743d94-config\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.683041 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.683914 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.683947 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9b6ef75b-d5d5-48ed-8b83-c7bcec92c7f7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9b6ef75b-d5d5-48ed-8b83-c7bcec92c7f7\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2b5733dd8ef4ada786dee46f46c77d8d428d038c082fcc5f27096cbac5a03ec3/globalmount\"" pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.684236 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.684375 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a47d0273-6c1a-4778-919c-5b7f07c0582e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a47d0273-6c1a-4778-919c-5b7f07c0582e\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/30493ca360aad386312b2985d6235dd0101c952f223bb96394410a8d506be82e/globalmount\"" pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.684497 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e378828-d06c-4a89-9ca3-f134cd743d94-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.685011 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.685683 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.688038 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e378828-d06c-4a89-9ca3-f134cd743d94-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.689099 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.691320 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.692528 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.694410 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.694458 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0574e13a-4bd0-4ff5-ad2d-266144e22f3b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0574e13a-4bd0-4ff5-ad2d-266144e22f3b\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fc6007cd7f436fd9ec1a1dc69ebf89a93cb3f489439c5ed690a70750ae6fc7f3/globalmount\"" pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.694781 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b267\" (UniqueName: \"kubernetes.io/projected/0e378828-d06c-4a89-9ca3-f134cd743d94-kube-api-access-5b267\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.697676 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.699174 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xk7g7\" (UniqueName: \"kubernetes.io/projected/015bc043-2935-42e9-8259-2a2a113e30a8-kube-api-access-xk7g7\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.700739 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/015bc043-2935-42e9-8259-2a2a113e30a8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.712384 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22ksj\" (UniqueName: \"kubernetes.io/projected/8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe-kube-api-access-22ksj\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.718219 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9b6ef75b-d5d5-48ed-8b83-c7bcec92c7f7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9b6ef75b-d5d5-48ed-8b83-c7bcec92c7f7\") pod \"ovsdbserver-sb-0\" (UID: \"015bc043-2935-42e9-8259-2a2a113e30a8\") " pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.725629 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a47d0273-6c1a-4778-919c-5b7f07c0582e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a47d0273-6c1a-4778-919c-5b7f07c0582e\") pod \"ovsdbserver-sb-2\" (UID: \"0e378828-d06c-4a89-9ca3-f134cd743d94\") " pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.736480 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0574e13a-4bd0-4ff5-ad2d-266144e22f3b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0574e13a-4bd0-4ff5-ad2d-266144e22f3b\") pod \"ovsdbserver-sb-1\" (UID: \"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe\") " pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.739276 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.805280 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.818979 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.870176 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"36c95ecd-713c-48cd-9a1d-3d4d7619f73a","Type":"ContainerStarted","Data":"a4ce0c3c001b35d81f3a9e8c7042e9333d024e195e222a39787f392e7259c00c"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.870213 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"36c95ecd-713c-48cd-9a1d-3d4d7619f73a","Type":"ContainerStarted","Data":"b2e2579dab4a191b1e1e34161eb44be84ae0a40bc7b7ed7edbe50f0e0a377c3c"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.870222 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"36c95ecd-713c-48cd-9a1d-3d4d7619f73a","Type":"ContainerStarted","Data":"2795f27612f7df3f3f8a49ca8b9a6798ab1fed26045054a265ffa1d149915fc1"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.873060 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"a9e304d1-7510-4764-915b-9a9d44d43587","Type":"ContainerStarted","Data":"a5103309883c60a9c27427db0ad193789e229e8063ae7c8bb60adb6ea34bb1ef"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.873115 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"a9e304d1-7510-4764-915b-9a9d44d43587","Type":"ContainerStarted","Data":"7d9cb6e5a85cf19e5a729300adeff776c44f77fd643b573e5a903ec5026f809b"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.873125 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"a9e304d1-7510-4764-915b-9a9d44d43587","Type":"ContainerStarted","Data":"387c44f43a61dd000fd52d3e778ccfe196acc91a0526155dba5c56357ea9c272"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.876314 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"ac0e53a1-0e72-4889-9306-50b1b0f60ee0","Type":"ContainerStarted","Data":"da8744d8a13abd4b9e50d48c5abf414817ecc52b0b5abf66baaebf02b34b4ac9"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.876343 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"ac0e53a1-0e72-4889-9306-50b1b0f60ee0","Type":"ContainerStarted","Data":"c7539302e8af2dac054d01d805b01c822e131c6fee50c8878b7f98f03e9faa00"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.876354 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"ac0e53a1-0e72-4889-9306-50b1b0f60ee0","Type":"ContainerStarted","Data":"e5da089fa9a8245139df1414f95e80dfa6e0df7c166a51aa0048acc8f9748091"} Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.895163 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=3.895146387 podStartE2EDuration="3.895146387s" podCreationTimestamp="2026-01-29 08:03:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:17.89105196 +0000 UTC m=+5289.562546517" watchObservedRunningTime="2026-01-29 08:03:17.895146387 +0000 UTC m=+5289.566640934" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.926650 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.926627162 podStartE2EDuration="3.926627162s" podCreationTimestamp="2026-01-29 08:03:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:17.915533641 +0000 UTC m=+5289.587028218" watchObservedRunningTime="2026-01-29 08:03:17.926627162 +0000 UTC m=+5289.598121729" Jan 29 08:03:17 crc kubenswrapper[4861]: I0129 08:03:17.945418 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.945396884 podStartE2EDuration="3.945396884s" podCreationTimestamp="2026-01-29 08:03:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:17.943968166 +0000 UTC m=+5289.615462723" watchObservedRunningTime="2026-01-29 08:03:17.945396884 +0000 UTC m=+5289.616891431" Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.371144 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 08:03:18 crc kubenswrapper[4861]: W0129 08:03:18.375024 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod015bc043_2935_42e9_8259_2a2a113e30a8.slice/crio-04b51fbc771572c64e231d8d4dd1ac9b8a91fde0b8ac79341fe1b5d125c45e80 WatchSource:0}: Error finding container 04b51fbc771572c64e231d8d4dd1ac9b8a91fde0b8ac79341fe1b5d125c45e80: Status 404 returned error can't find the container with id 04b51fbc771572c64e231d8d4dd1ac9b8a91fde0b8ac79341fe1b5d125c45e80 Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.479213 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 29 08:03:18 crc kubenswrapper[4861]: W0129 08:03:18.485903 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8dd96bd8_e0b0_4c8a_98d0_2da90a7e6ebe.slice/crio-4ac756588344542cefcb8cff86c545f325285c983d5d931b11f9e81e5dbd593f WatchSource:0}: Error finding container 4ac756588344542cefcb8cff86c545f325285c983d5d931b11f9e81e5dbd593f: Status 404 returned error can't find the container with id 4ac756588344542cefcb8cff86c545f325285c983d5d931b11f9e81e5dbd593f Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.886521 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"015bc043-2935-42e9-8259-2a2a113e30a8","Type":"ContainerStarted","Data":"768412d32c6896c1a16f9a6dd2eae0ab7f752583b4fbedd9c0410e7343af2013"} Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.886562 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"015bc043-2935-42e9-8259-2a2a113e30a8","Type":"ContainerStarted","Data":"b55c97a1bfbb3b6fe46c18152c7224112909cbb0d8c810c1b6c037819a968bf9"} Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.886571 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"015bc043-2935-42e9-8259-2a2a113e30a8","Type":"ContainerStarted","Data":"04b51fbc771572c64e231d8d4dd1ac9b8a91fde0b8ac79341fe1b5d125c45e80"} Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.888994 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe","Type":"ContainerStarted","Data":"0873e953a52fee0da85167a383e1a3e5990dc1a3550dbb87d6491e797cdf962a"} Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.889054 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe","Type":"ContainerStarted","Data":"0a9c67426dd9dd418309b00358a36cd7a001d3cd9ea531c217f56b6d756d71cf"} Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.889099 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe","Type":"ContainerStarted","Data":"4ac756588344542cefcb8cff86c545f325285c983d5d931b11f9e81e5dbd593f"} Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.916614 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=2.916596452 podStartE2EDuration="2.916596452s" podCreationTimestamp="2026-01-29 08:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:18.9108226 +0000 UTC m=+5290.582317167" watchObservedRunningTime="2026-01-29 08:03:18.916596452 +0000 UTC m=+5290.588091009" Jan 29 08:03:18 crc kubenswrapper[4861]: I0129 08:03:18.938434 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=2.938416923 podStartE2EDuration="2.938416923s" podCreationTimestamp="2026-01-29 08:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:18.93485746 +0000 UTC m=+5290.606352017" watchObservedRunningTime="2026-01-29 08:03:18.938416923 +0000 UTC m=+5290.609911480" Jan 29 08:03:19 crc kubenswrapper[4861]: I0129 08:03:19.250431 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:19 crc kubenswrapper[4861]: I0129 08:03:19.307637 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 29 08:03:19 crc kubenswrapper[4861]: W0129 08:03:19.315014 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e378828_d06c_4a89_9ca3_f134cd743d94.slice/crio-975e48019c5706a7fa059a2d860822b12c4810c30f767992e64ac1be364d9304 WatchSource:0}: Error finding container 975e48019c5706a7fa059a2d860822b12c4810c30f767992e64ac1be364d9304: Status 404 returned error can't find the container with id 975e48019c5706a7fa059a2d860822b12c4810c30f767992e64ac1be364d9304 Jan 29 08:03:19 crc kubenswrapper[4861]: I0129 08:03:19.592203 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:19 crc kubenswrapper[4861]: I0129 08:03:19.602976 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:19 crc kubenswrapper[4861]: I0129 08:03:19.906236 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"0e378828-d06c-4a89-9ca3-f134cd743d94","Type":"ContainerStarted","Data":"add1d42d7e951b253a706657a1f44ca55957cf2acef1ce8b219f4da91ac563ff"} Jan 29 08:03:19 crc kubenswrapper[4861]: I0129 08:03:19.906307 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"0e378828-d06c-4a89-9ca3-f134cd743d94","Type":"ContainerStarted","Data":"e504c61f2f5e2036ffb9d72105327868cc13dd1f84f9d9c5720f85a1a48ed14f"} Jan 29 08:03:19 crc kubenswrapper[4861]: I0129 08:03:19.906320 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"0e378828-d06c-4a89-9ca3-f134cd743d94","Type":"ContainerStarted","Data":"975e48019c5706a7fa059a2d860822b12c4810c30f767992e64ac1be364d9304"} Jan 29 08:03:19 crc kubenswrapper[4861]: I0129 08:03:19.935427 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=3.935403087 podStartE2EDuration="3.935403087s" podCreationTimestamp="2026-01-29 08:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:19.932301846 +0000 UTC m=+5291.603796423" watchObservedRunningTime="2026-01-29 08:03:19.935403087 +0000 UTC m=+5291.606897644" Jan 29 08:03:20 crc kubenswrapper[4861]: I0129 08:03:20.739710 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:20 crc kubenswrapper[4861]: I0129 08:03:20.806113 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:20 crc kubenswrapper[4861]: I0129 08:03:20.819588 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:21 crc kubenswrapper[4861]: I0129 08:03:21.250924 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:21 crc kubenswrapper[4861]: I0129 08:03:21.592856 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:21 crc kubenswrapper[4861]: I0129 08:03:21.602433 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.310657 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.354758 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.634990 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6dc855b79-rd8jn"] Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.641654 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.645935 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.650957 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dc855b79-rd8jn"] Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.651336 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.659053 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.664620 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-config\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.664680 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-dns-svc\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.664729 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-ovsdbserver-nb\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.664755 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fm2f\" (UniqueName: \"kubernetes.io/projected/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-kube-api-access-9fm2f\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.697799 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.703907 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.741187 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.767526 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-ovsdbserver-nb\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.767569 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fm2f\" (UniqueName: \"kubernetes.io/projected/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-kube-api-access-9fm2f\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.769018 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-ovsdbserver-nb\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.772302 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-config\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.772395 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-dns-svc\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.773318 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-config\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.773492 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-dns-svc\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.796765 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fm2f\" (UniqueName: \"kubernetes.io/projected/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-kube-api-access-9fm2f\") pod \"dnsmasq-dns-6dc855b79-rd8jn\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.805322 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.819232 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:22 crc kubenswrapper[4861]: I0129 08:03:22.962927 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:23 crc kubenswrapper[4861]: W0129 08:03:23.445606 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9f8dfcbc_aac4_430a_b0fb_fb57092bf898.slice/crio-bae27f1606809e94311a2ba38a262210283f32d8515b4aa74391a5bff4be28fa WatchSource:0}: Error finding container bae27f1606809e94311a2ba38a262210283f32d8515b4aa74391a5bff4be28fa: Status 404 returned error can't find the container with id bae27f1606809e94311a2ba38a262210283f32d8515b4aa74391a5bff4be28fa Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.451552 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dc855b79-rd8jn"] Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.811016 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.873289 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.877694 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.888050 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.930391 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.950963 4861 generic.go:334] "Generic (PLEG): container finished" podID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" containerID="9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861" exitCode=0 Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.951242 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" event={"ID":"9f8dfcbc-aac4-430a-b0fb-fb57092bf898","Type":"ContainerDied","Data":"9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861"} Jan 29 08:03:23 crc kubenswrapper[4861]: I0129 08:03:23.952085 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" event={"ID":"9f8dfcbc-aac4-430a-b0fb-fb57092bf898","Type":"ContainerStarted","Data":"bae27f1606809e94311a2ba38a262210283f32d8515b4aa74391a5bff4be28fa"} Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.082895 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dc855b79-rd8jn"] Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.108962 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67fbd77559-nvbhp"] Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.111636 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.114205 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.143565 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fbd77559-nvbhp"] Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.212247 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-config\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.212334 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-dns-svc\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.212428 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-sb\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.212626 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-nb\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.212736 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp9wc\" (UniqueName: \"kubernetes.io/projected/b928b334-0553-43a5-9572-f900b2e0fafd-kube-api-access-qp9wc\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.313852 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-dns-svc\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.313913 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-sb\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.313966 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-nb\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.314022 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp9wc\" (UniqueName: \"kubernetes.io/projected/b928b334-0553-43a5-9572-f900b2e0fafd-kube-api-access-qp9wc\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.314120 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-config\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.314884 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-dns-svc\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.314960 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-nb\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.314998 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-sb\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.315017 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-config\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.338030 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp9wc\" (UniqueName: \"kubernetes.io/projected/b928b334-0553-43a5-9572-f900b2e0fafd-kube-api-access-qp9wc\") pod \"dnsmasq-dns-67fbd77559-nvbhp\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.455664 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.942192 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fbd77559-nvbhp"] Jan 29 08:03:24 crc kubenswrapper[4861]: W0129 08:03:24.957528 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb928b334_0553_43a5_9572_f900b2e0fafd.slice/crio-eb84d9c8062a89a5212d2c45b5d4d9a55589c45bc299a032c117a4b827fb90b4 WatchSource:0}: Error finding container eb84d9c8062a89a5212d2c45b5d4d9a55589c45bc299a032c117a4b827fb90b4: Status 404 returned error can't find the container with id eb84d9c8062a89a5212d2c45b5d4d9a55589c45bc299a032c117a4b827fb90b4 Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.963816 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" event={"ID":"9f8dfcbc-aac4-430a-b0fb-fb57092bf898","Type":"ContainerStarted","Data":"2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e"} Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.964115 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" podUID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" containerName="dnsmasq-dns" containerID="cri-o://2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e" gracePeriod=10 Jan 29 08:03:24 crc kubenswrapper[4861]: I0129 08:03:24.964463 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:24.999269 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" podStartSLOduration=2.999143148 podStartE2EDuration="2.999143148s" podCreationTimestamp="2026-01-29 08:03:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:24.988192851 +0000 UTC m=+5296.659687458" watchObservedRunningTime="2026-01-29 08:03:24.999143148 +0000 UTC m=+5296.670637725" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.413211 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.432783 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fm2f\" (UniqueName: \"kubernetes.io/projected/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-kube-api-access-9fm2f\") pod \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.432912 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-dns-svc\") pod \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.432965 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-ovsdbserver-nb\") pod \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.433005 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-config\") pod \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\" (UID: \"9f8dfcbc-aac4-430a-b0fb-fb57092bf898\") " Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.441870 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-kube-api-access-9fm2f" (OuterVolumeSpecName: "kube-api-access-9fm2f") pod "9f8dfcbc-aac4-430a-b0fb-fb57092bf898" (UID: "9f8dfcbc-aac4-430a-b0fb-fb57092bf898"). InnerVolumeSpecName "kube-api-access-9fm2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.488906 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9f8dfcbc-aac4-430a-b0fb-fb57092bf898" (UID: "9f8dfcbc-aac4-430a-b0fb-fb57092bf898"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.489769 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9f8dfcbc-aac4-430a-b0fb-fb57092bf898" (UID: "9f8dfcbc-aac4-430a-b0fb-fb57092bf898"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.511591 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-config" (OuterVolumeSpecName: "config") pod "9f8dfcbc-aac4-430a-b0fb-fb57092bf898" (UID: "9f8dfcbc-aac4-430a-b0fb-fb57092bf898"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.534524 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.534553 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.534563 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fm2f\" (UniqueName: \"kubernetes.io/projected/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-kube-api-access-9fm2f\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.534574 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9f8dfcbc-aac4-430a-b0fb-fb57092bf898-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.638697 4861 scope.go:117] "RemoveContainer" containerID="2befb88b6b29953445d205188809a3dc31da1a81f34f879665dc3c3ebde1246e" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.975358 4861 generic.go:334] "Generic (PLEG): container finished" podID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" containerID="2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e" exitCode=0 Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.975403 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.975420 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" event={"ID":"9f8dfcbc-aac4-430a-b0fb-fb57092bf898","Type":"ContainerDied","Data":"2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e"} Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.975890 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dc855b79-rd8jn" event={"ID":"9f8dfcbc-aac4-430a-b0fb-fb57092bf898","Type":"ContainerDied","Data":"bae27f1606809e94311a2ba38a262210283f32d8515b4aa74391a5bff4be28fa"} Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.975912 4861 scope.go:117] "RemoveContainer" containerID="2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e" Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.979973 4861 generic.go:334] "Generic (PLEG): container finished" podID="b928b334-0553-43a5-9572-f900b2e0fafd" containerID="c102c68d6af69ced1e949ac7020c9579018acf179075177009c8264e6662e1eb" exitCode=0 Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.980004 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" event={"ID":"b928b334-0553-43a5-9572-f900b2e0fafd","Type":"ContainerDied","Data":"c102c68d6af69ced1e949ac7020c9579018acf179075177009c8264e6662e1eb"} Jan 29 08:03:25 crc kubenswrapper[4861]: I0129 08:03:25.980019 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" event={"ID":"b928b334-0553-43a5-9572-f900b2e0fafd","Type":"ContainerStarted","Data":"eb84d9c8062a89a5212d2c45b5d4d9a55589c45bc299a032c117a4b827fb90b4"} Jan 29 08:03:26 crc kubenswrapper[4861]: I0129 08:03:26.016967 4861 scope.go:117] "RemoveContainer" containerID="9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861" Jan 29 08:03:26 crc kubenswrapper[4861]: I0129 08:03:26.029499 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dc855b79-rd8jn"] Jan 29 08:03:26 crc kubenswrapper[4861]: I0129 08:03:26.037260 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6dc855b79-rd8jn"] Jan 29 08:03:26 crc kubenswrapper[4861]: I0129 08:03:26.106982 4861 scope.go:117] "RemoveContainer" containerID="2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e" Jan 29 08:03:26 crc kubenswrapper[4861]: E0129 08:03:26.107488 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e\": container with ID starting with 2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e not found: ID does not exist" containerID="2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e" Jan 29 08:03:26 crc kubenswrapper[4861]: I0129 08:03:26.107532 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e"} err="failed to get container status \"2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e\": rpc error: code = NotFound desc = could not find container \"2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e\": container with ID starting with 2ce1f6e40444d38c84e490af1cf406e5203a32c2b08b8ddf413d6fff185cbf8e not found: ID does not exist" Jan 29 08:03:26 crc kubenswrapper[4861]: I0129 08:03:26.107559 4861 scope.go:117] "RemoveContainer" containerID="9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861" Jan 29 08:03:26 crc kubenswrapper[4861]: E0129 08:03:26.108058 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861\": container with ID starting with 9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861 not found: ID does not exist" containerID="9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861" Jan 29 08:03:26 crc kubenswrapper[4861]: I0129 08:03:26.108150 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861"} err="failed to get container status \"9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861\": rpc error: code = NotFound desc = could not find container \"9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861\": container with ID starting with 9526ae75cbb73ce598a12b2419b4e1936606906c6bdbe67828851ee5e183b861 not found: ID does not exist" Jan 29 08:03:26 crc kubenswrapper[4861]: I0129 08:03:26.998634 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" event={"ID":"b928b334-0553-43a5-9572-f900b2e0fafd","Type":"ContainerStarted","Data":"7e6e918a09746400101728071b647975eda426e6c7b4f8c2ce273ca5a436ae1f"} Jan 29 08:03:27 crc kubenswrapper[4861]: I0129 08:03:26.999561 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:27 crc kubenswrapper[4861]: I0129 08:03:27.033226 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" podStartSLOduration=3.033205215 podStartE2EDuration="3.033205215s" podCreationTimestamp="2026-01-29 08:03:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:27.03149081 +0000 UTC m=+5298.702985447" watchObservedRunningTime="2026-01-29 08:03:27.033205215 +0000 UTC m=+5298.704699782" Jan 29 08:03:27 crc kubenswrapper[4861]: I0129 08:03:27.130711 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" path="/var/lib/kubelet/pods/9f8dfcbc-aac4-430a-b0fb-fb57092bf898/volumes" Jan 29 08:03:27 crc kubenswrapper[4861]: I0129 08:03:27.872746 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Jan 29 08:03:28 crc kubenswrapper[4861]: I0129 08:03:28.116913 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:03:28 crc kubenswrapper[4861]: E0129 08:03:28.117185 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.037909 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Jan 29 08:03:30 crc kubenswrapper[4861]: E0129 08:03:30.042835 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" containerName="init" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.042880 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" containerName="init" Jan 29 08:03:30 crc kubenswrapper[4861]: E0129 08:03:30.042951 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" containerName="dnsmasq-dns" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.042960 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" containerName="dnsmasq-dns" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.043336 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f8dfcbc-aac4-430a-b0fb-fb57092bf898" containerName="dnsmasq-dns" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.044047 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.046761 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.048553 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.222916 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/46b0bd27-5d0b-41ee-87ec-37bc3bd687d4-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.223020 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5b820974-15cc-48f4-81ee-524bce87359a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5b820974-15cc-48f4-81ee-524bce87359a\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.223482 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7mhv\" (UniqueName: \"kubernetes.io/projected/46b0bd27-5d0b-41ee-87ec-37bc3bd687d4-kube-api-access-q7mhv\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.325787 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5b820974-15cc-48f4-81ee-524bce87359a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5b820974-15cc-48f4-81ee-524bce87359a\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.325980 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7mhv\" (UniqueName: \"kubernetes.io/projected/46b0bd27-5d0b-41ee-87ec-37bc3bd687d4-kube-api-access-q7mhv\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.326328 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/46b0bd27-5d0b-41ee-87ec-37bc3bd687d4-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.333587 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.333684 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5b820974-15cc-48f4-81ee-524bce87359a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5b820974-15cc-48f4-81ee-524bce87359a\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ab2c561e4cee51d559d8a1e20ea86ece7d23dfd32713ca80b88438304bd57543/globalmount\"" pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.341124 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/46b0bd27-5d0b-41ee-87ec-37bc3bd687d4-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.359609 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7mhv\" (UniqueName: \"kubernetes.io/projected/46b0bd27-5d0b-41ee-87ec-37bc3bd687d4-kube-api-access-q7mhv\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.387213 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5b820974-15cc-48f4-81ee-524bce87359a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5b820974-15cc-48f4-81ee-524bce87359a\") pod \"ovn-copy-data\" (UID: \"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4\") " pod="openstack/ovn-copy-data" Jan 29 08:03:30 crc kubenswrapper[4861]: I0129 08:03:30.677666 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 29 08:03:31 crc kubenswrapper[4861]: I0129 08:03:31.299477 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 29 08:03:32 crc kubenswrapper[4861]: I0129 08:03:32.053525 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4","Type":"ContainerStarted","Data":"8ee99db89fa8c68325467a6e9516a9dca294b27d9ff1b3775bb1f895f86f2ca3"} Jan 29 08:03:32 crc kubenswrapper[4861]: I0129 08:03:32.053898 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"46b0bd27-5d0b-41ee-87ec-37bc3bd687d4","Type":"ContainerStarted","Data":"a79a0576c70e7e083f472786b68d5e473765edd7720b13c60b77271d385b90c9"} Jan 29 08:03:32 crc kubenswrapper[4861]: I0129 08:03:32.083107 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=2.591398814 podStartE2EDuration="3.083065682s" podCreationTimestamp="2026-01-29 08:03:29 +0000 UTC" firstStartedPulling="2026-01-29 08:03:31.303712829 +0000 UTC m=+5302.975207396" lastFinishedPulling="2026-01-29 08:03:31.795379707 +0000 UTC m=+5303.466874264" observedRunningTime="2026-01-29 08:03:32.073645305 +0000 UTC m=+5303.745139872" watchObservedRunningTime="2026-01-29 08:03:32.083065682 +0000 UTC m=+5303.754560249" Jan 29 08:03:34 crc kubenswrapper[4861]: E0129 08:03:34.192431 4861 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.80:42562->38.102.83.80:46667: read tcp 38.102.83.80:42562->38.102.83.80:46667: read: connection reset by peer Jan 29 08:03:34 crc kubenswrapper[4861]: E0129 08:03:34.192518 4861 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.80:42562->38.102.83.80:46667: write tcp 38.102.83.80:42562->38.102.83.80:46667: write: broken pipe Jan 29 08:03:34 crc kubenswrapper[4861]: I0129 08:03:34.458305 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:03:34 crc kubenswrapper[4861]: I0129 08:03:34.541170 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-mdp2f"] Jan 29 08:03:34 crc kubenswrapper[4861]: I0129 08:03:34.541508 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" podUID="b3372ba1-4e55-4072-b59b-ca130544ff26" containerName="dnsmasq-dns" containerID="cri-o://8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9" gracePeriod=10 Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.011437 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.080675 4861 generic.go:334] "Generic (PLEG): container finished" podID="b3372ba1-4e55-4072-b59b-ca130544ff26" containerID="8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9" exitCode=0 Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.080719 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" event={"ID":"b3372ba1-4e55-4072-b59b-ca130544ff26","Type":"ContainerDied","Data":"8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9"} Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.080736 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.080756 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-mdp2f" event={"ID":"b3372ba1-4e55-4072-b59b-ca130544ff26","Type":"ContainerDied","Data":"1c0f7d7e7930935e36f67a0d26e19ed1d07d474187845ae1fe784876f3a91abc"} Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.080777 4861 scope.go:117] "RemoveContainer" containerID="8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.099897 4861 scope.go:117] "RemoveContainer" containerID="04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.115094 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6l6g\" (UniqueName: \"kubernetes.io/projected/b3372ba1-4e55-4072-b59b-ca130544ff26-kube-api-access-d6l6g\") pod \"b3372ba1-4e55-4072-b59b-ca130544ff26\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.115214 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-config\") pod \"b3372ba1-4e55-4072-b59b-ca130544ff26\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.115356 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-dns-svc\") pod \"b3372ba1-4e55-4072-b59b-ca130544ff26\" (UID: \"b3372ba1-4e55-4072-b59b-ca130544ff26\") " Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.123936 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3372ba1-4e55-4072-b59b-ca130544ff26-kube-api-access-d6l6g" (OuterVolumeSpecName: "kube-api-access-d6l6g") pod "b3372ba1-4e55-4072-b59b-ca130544ff26" (UID: "b3372ba1-4e55-4072-b59b-ca130544ff26"). InnerVolumeSpecName "kube-api-access-d6l6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.125593 4861 scope.go:117] "RemoveContainer" containerID="8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9" Jan 29 08:03:35 crc kubenswrapper[4861]: E0129 08:03:35.126091 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9\": container with ID starting with 8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9 not found: ID does not exist" containerID="8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.126136 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9"} err="failed to get container status \"8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9\": rpc error: code = NotFound desc = could not find container \"8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9\": container with ID starting with 8944febe92d3c6e5f492f24aaad5caa10c7f788f0c5b2f7ea3cf83f1051738e9 not found: ID does not exist" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.126159 4861 scope.go:117] "RemoveContainer" containerID="04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993" Jan 29 08:03:35 crc kubenswrapper[4861]: E0129 08:03:35.126954 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993\": container with ID starting with 04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993 not found: ID does not exist" containerID="04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.126978 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993"} err="failed to get container status \"04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993\": rpc error: code = NotFound desc = could not find container \"04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993\": container with ID starting with 04f2ed9ed01f3dd85ccd00d380f0cbefec4481c7d3cc925a54a230612f741993 not found: ID does not exist" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.150848 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-config" (OuterVolumeSpecName: "config") pod "b3372ba1-4e55-4072-b59b-ca130544ff26" (UID: "b3372ba1-4e55-4072-b59b-ca130544ff26"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.167535 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b3372ba1-4e55-4072-b59b-ca130544ff26" (UID: "b3372ba1-4e55-4072-b59b-ca130544ff26"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.217114 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.217257 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6l6g\" (UniqueName: \"kubernetes.io/projected/b3372ba1-4e55-4072-b59b-ca130544ff26-kube-api-access-d6l6g\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.217598 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3372ba1-4e55-4072-b59b-ca130544ff26-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.428524 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-mdp2f"] Jan 29 08:03:35 crc kubenswrapper[4861]: I0129 08:03:35.441770 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-mdp2f"] Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.761912 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 29 08:03:36 crc kubenswrapper[4861]: E0129 08:03:36.762625 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3372ba1-4e55-4072-b59b-ca130544ff26" containerName="init" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.762643 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3372ba1-4e55-4072-b59b-ca130544ff26" containerName="init" Jan 29 08:03:36 crc kubenswrapper[4861]: E0129 08:03:36.762670 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3372ba1-4e55-4072-b59b-ca130544ff26" containerName="dnsmasq-dns" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.762682 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3372ba1-4e55-4072-b59b-ca130544ff26" containerName="dnsmasq-dns" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.762868 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3372ba1-4e55-4072-b59b-ca130544ff26" containerName="dnsmasq-dns" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.763847 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.769710 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.770168 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-ktsxs" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.770481 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.770668 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.781311 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.845745 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vf9l\" (UniqueName: \"kubernetes.io/projected/d9a0af9d-96bd-42dd-9d60-0581b51b9981-kube-api-access-8vf9l\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.845810 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9a0af9d-96bd-42dd-9d60-0581b51b9981-scripts\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.845853 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.845878 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9a0af9d-96bd-42dd-9d60-0581b51b9981-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.845900 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.845979 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.846063 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9a0af9d-96bd-42dd-9d60-0581b51b9981-config\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.948459 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.948520 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9a0af9d-96bd-42dd-9d60-0581b51b9981-config\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.948572 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vf9l\" (UniqueName: \"kubernetes.io/projected/d9a0af9d-96bd-42dd-9d60-0581b51b9981-kube-api-access-8vf9l\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.948644 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9a0af9d-96bd-42dd-9d60-0581b51b9981-scripts\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.948687 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.948740 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9a0af9d-96bd-42dd-9d60-0581b51b9981-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.948771 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.949527 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9a0af9d-96bd-42dd-9d60-0581b51b9981-config\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.949823 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9a0af9d-96bd-42dd-9d60-0581b51b9981-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.950505 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9a0af9d-96bd-42dd-9d60-0581b51b9981-scripts\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.953583 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.954278 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.956098 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9a0af9d-96bd-42dd-9d60-0581b51b9981-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:36 crc kubenswrapper[4861]: I0129 08:03:36.964356 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vf9l\" (UniqueName: \"kubernetes.io/projected/d9a0af9d-96bd-42dd-9d60-0581b51b9981-kube-api-access-8vf9l\") pod \"ovn-northd-0\" (UID: \"d9a0af9d-96bd-42dd-9d60-0581b51b9981\") " pod="openstack/ovn-northd-0" Jan 29 08:03:37 crc kubenswrapper[4861]: I0129 08:03:37.124336 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3372ba1-4e55-4072-b59b-ca130544ff26" path="/var/lib/kubelet/pods/b3372ba1-4e55-4072-b59b-ca130544ff26/volumes" Jan 29 08:03:37 crc kubenswrapper[4861]: I0129 08:03:37.127373 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 08:03:37 crc kubenswrapper[4861]: I0129 08:03:37.615501 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 08:03:38 crc kubenswrapper[4861]: I0129 08:03:38.108859 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d9a0af9d-96bd-42dd-9d60-0581b51b9981","Type":"ContainerStarted","Data":"a7bf9f304644d491b68a733ac94c81e53382e39f00419be7aaec8fedd8da94a5"} Jan 29 08:03:38 crc kubenswrapper[4861]: I0129 08:03:38.109228 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 29 08:03:38 crc kubenswrapper[4861]: I0129 08:03:38.109244 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d9a0af9d-96bd-42dd-9d60-0581b51b9981","Type":"ContainerStarted","Data":"e072cbfe9857925e0914d60a7b7c6c9c90f62fa3c3940866f2f0c3b39b3dcf65"} Jan 29 08:03:38 crc kubenswrapper[4861]: I0129 08:03:38.109256 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d9a0af9d-96bd-42dd-9d60-0581b51b9981","Type":"ContainerStarted","Data":"1bce61edc2f08fde7a60d98d0f624439cb532400cb754cbff3cb5ebce180f7df"} Jan 29 08:03:38 crc kubenswrapper[4861]: I0129 08:03:38.137044 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.137026758 podStartE2EDuration="2.137026758s" podCreationTimestamp="2026-01-29 08:03:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:38.129531272 +0000 UTC m=+5309.801025849" watchObservedRunningTime="2026-01-29 08:03:38.137026758 +0000 UTC m=+5309.808521315" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.116332 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.503092 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-7pslv"] Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.504675 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.508741 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bf8c-account-create-update-h5fb5"] Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.509711 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.511578 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.520854 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-7pslv"] Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.537051 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bf8c-account-create-update-h5fb5"] Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.643623 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06c6849-8f0a-4626-9ae3-1922b1196771-operator-scripts\") pod \"keystone-db-create-7pslv\" (UID: \"d06c6849-8f0a-4626-9ae3-1922b1196771\") " pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.643676 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6g4n\" (UniqueName: \"kubernetes.io/projected/d06c6849-8f0a-4626-9ae3-1922b1196771-kube-api-access-p6g4n\") pod \"keystone-db-create-7pslv\" (UID: \"d06c6849-8f0a-4626-9ae3-1922b1196771\") " pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.643775 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af4e8282-a6ea-40e8-bdb6-72115771f88e-operator-scripts\") pod \"keystone-bf8c-account-create-update-h5fb5\" (UID: \"af4e8282-a6ea-40e8-bdb6-72115771f88e\") " pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.643809 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7svzr\" (UniqueName: \"kubernetes.io/projected/af4e8282-a6ea-40e8-bdb6-72115771f88e-kube-api-access-7svzr\") pod \"keystone-bf8c-account-create-update-h5fb5\" (UID: \"af4e8282-a6ea-40e8-bdb6-72115771f88e\") " pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.745759 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af4e8282-a6ea-40e8-bdb6-72115771f88e-operator-scripts\") pod \"keystone-bf8c-account-create-update-h5fb5\" (UID: \"af4e8282-a6ea-40e8-bdb6-72115771f88e\") " pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.745812 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7svzr\" (UniqueName: \"kubernetes.io/projected/af4e8282-a6ea-40e8-bdb6-72115771f88e-kube-api-access-7svzr\") pod \"keystone-bf8c-account-create-update-h5fb5\" (UID: \"af4e8282-a6ea-40e8-bdb6-72115771f88e\") " pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.745855 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06c6849-8f0a-4626-9ae3-1922b1196771-operator-scripts\") pod \"keystone-db-create-7pslv\" (UID: \"d06c6849-8f0a-4626-9ae3-1922b1196771\") " pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.745892 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6g4n\" (UniqueName: \"kubernetes.io/projected/d06c6849-8f0a-4626-9ae3-1922b1196771-kube-api-access-p6g4n\") pod \"keystone-db-create-7pslv\" (UID: \"d06c6849-8f0a-4626-9ae3-1922b1196771\") " pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.746542 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af4e8282-a6ea-40e8-bdb6-72115771f88e-operator-scripts\") pod \"keystone-bf8c-account-create-update-h5fb5\" (UID: \"af4e8282-a6ea-40e8-bdb6-72115771f88e\") " pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.747087 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06c6849-8f0a-4626-9ae3-1922b1196771-operator-scripts\") pod \"keystone-db-create-7pslv\" (UID: \"d06c6849-8f0a-4626-9ae3-1922b1196771\") " pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.765044 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6g4n\" (UniqueName: \"kubernetes.io/projected/d06c6849-8f0a-4626-9ae3-1922b1196771-kube-api-access-p6g4n\") pod \"keystone-db-create-7pslv\" (UID: \"d06c6849-8f0a-4626-9ae3-1922b1196771\") " pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.773907 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7svzr\" (UniqueName: \"kubernetes.io/projected/af4e8282-a6ea-40e8-bdb6-72115771f88e-kube-api-access-7svzr\") pod \"keystone-bf8c-account-create-update-h5fb5\" (UID: \"af4e8282-a6ea-40e8-bdb6-72115771f88e\") " pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.832464 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:41 crc kubenswrapper[4861]: I0129 08:03:41.839945 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:42 crc kubenswrapper[4861]: I0129 08:03:42.148403 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"f12c7cd3a0871c191ad7ec4bd142001b746849f696737470eed8fe923ec11fff"} Jan 29 08:03:42 crc kubenswrapper[4861]: I0129 08:03:42.316355 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-7pslv"] Jan 29 08:03:42 crc kubenswrapper[4861]: I0129 08:03:42.379407 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bf8c-account-create-update-h5fb5"] Jan 29 08:03:43 crc kubenswrapper[4861]: I0129 08:03:43.158399 4861 generic.go:334] "Generic (PLEG): container finished" podID="d06c6849-8f0a-4626-9ae3-1922b1196771" containerID="ca66f7168cdb049ceb8f4ca4419850e3f8f81328c1406ee864e6eee211a0ab05" exitCode=0 Jan 29 08:03:43 crc kubenswrapper[4861]: I0129 08:03:43.158488 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7pslv" event={"ID":"d06c6849-8f0a-4626-9ae3-1922b1196771","Type":"ContainerDied","Data":"ca66f7168cdb049ceb8f4ca4419850e3f8f81328c1406ee864e6eee211a0ab05"} Jan 29 08:03:43 crc kubenswrapper[4861]: I0129 08:03:43.159053 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7pslv" event={"ID":"d06c6849-8f0a-4626-9ae3-1922b1196771","Type":"ContainerStarted","Data":"7e1272d2c77bf92f91cfc71b8fc84f367dd69597a4070f5d6f94e0571c4e7b8a"} Jan 29 08:03:43 crc kubenswrapper[4861]: I0129 08:03:43.161341 4861 generic.go:334] "Generic (PLEG): container finished" podID="af4e8282-a6ea-40e8-bdb6-72115771f88e" containerID="058a0f70dd93d722bfef6f49e08a2bc112d16bde8851b7461552897dc4e195fa" exitCode=0 Jan 29 08:03:43 crc kubenswrapper[4861]: I0129 08:03:43.161393 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bf8c-account-create-update-h5fb5" event={"ID":"af4e8282-a6ea-40e8-bdb6-72115771f88e","Type":"ContainerDied","Data":"058a0f70dd93d722bfef6f49e08a2bc112d16bde8851b7461552897dc4e195fa"} Jan 29 08:03:43 crc kubenswrapper[4861]: I0129 08:03:43.161420 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bf8c-account-create-update-h5fb5" event={"ID":"af4e8282-a6ea-40e8-bdb6-72115771f88e","Type":"ContainerStarted","Data":"4da59299cc20823c46d73323176b01080712962e0836ef3ea42031827ce28e7c"} Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.574624 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.581245 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.702809 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6g4n\" (UniqueName: \"kubernetes.io/projected/d06c6849-8f0a-4626-9ae3-1922b1196771-kube-api-access-p6g4n\") pod \"d06c6849-8f0a-4626-9ae3-1922b1196771\" (UID: \"d06c6849-8f0a-4626-9ae3-1922b1196771\") " Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.702850 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af4e8282-a6ea-40e8-bdb6-72115771f88e-operator-scripts\") pod \"af4e8282-a6ea-40e8-bdb6-72115771f88e\" (UID: \"af4e8282-a6ea-40e8-bdb6-72115771f88e\") " Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.702983 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7svzr\" (UniqueName: \"kubernetes.io/projected/af4e8282-a6ea-40e8-bdb6-72115771f88e-kube-api-access-7svzr\") pod \"af4e8282-a6ea-40e8-bdb6-72115771f88e\" (UID: \"af4e8282-a6ea-40e8-bdb6-72115771f88e\") " Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.703009 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06c6849-8f0a-4626-9ae3-1922b1196771-operator-scripts\") pod \"d06c6849-8f0a-4626-9ae3-1922b1196771\" (UID: \"d06c6849-8f0a-4626-9ae3-1922b1196771\") " Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.703775 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d06c6849-8f0a-4626-9ae3-1922b1196771-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d06c6849-8f0a-4626-9ae3-1922b1196771" (UID: "d06c6849-8f0a-4626-9ae3-1922b1196771"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.703779 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af4e8282-a6ea-40e8-bdb6-72115771f88e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "af4e8282-a6ea-40e8-bdb6-72115771f88e" (UID: "af4e8282-a6ea-40e8-bdb6-72115771f88e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.704021 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af4e8282-a6ea-40e8-bdb6-72115771f88e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.704041 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06c6849-8f0a-4626-9ae3-1922b1196771-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.708223 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af4e8282-a6ea-40e8-bdb6-72115771f88e-kube-api-access-7svzr" (OuterVolumeSpecName: "kube-api-access-7svzr") pod "af4e8282-a6ea-40e8-bdb6-72115771f88e" (UID: "af4e8282-a6ea-40e8-bdb6-72115771f88e"). InnerVolumeSpecName "kube-api-access-7svzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.708350 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d06c6849-8f0a-4626-9ae3-1922b1196771-kube-api-access-p6g4n" (OuterVolumeSpecName: "kube-api-access-p6g4n") pod "d06c6849-8f0a-4626-9ae3-1922b1196771" (UID: "d06c6849-8f0a-4626-9ae3-1922b1196771"). InnerVolumeSpecName "kube-api-access-p6g4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.806236 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7svzr\" (UniqueName: \"kubernetes.io/projected/af4e8282-a6ea-40e8-bdb6-72115771f88e-kube-api-access-7svzr\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:44 crc kubenswrapper[4861]: I0129 08:03:44.806301 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6g4n\" (UniqueName: \"kubernetes.io/projected/d06c6849-8f0a-4626-9ae3-1922b1196771-kube-api-access-p6g4n\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:45 crc kubenswrapper[4861]: I0129 08:03:45.187450 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bf8c-account-create-update-h5fb5" event={"ID":"af4e8282-a6ea-40e8-bdb6-72115771f88e","Type":"ContainerDied","Data":"4da59299cc20823c46d73323176b01080712962e0836ef3ea42031827ce28e7c"} Jan 29 08:03:45 crc kubenswrapper[4861]: I0129 08:03:45.187495 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4da59299cc20823c46d73323176b01080712962e0836ef3ea42031827ce28e7c" Jan 29 08:03:45 crc kubenswrapper[4861]: I0129 08:03:45.187534 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bf8c-account-create-update-h5fb5" Jan 29 08:03:45 crc kubenswrapper[4861]: I0129 08:03:45.189098 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7pslv" event={"ID":"d06c6849-8f0a-4626-9ae3-1922b1196771","Type":"ContainerDied","Data":"7e1272d2c77bf92f91cfc71b8fc84f367dd69597a4070f5d6f94e0571c4e7b8a"} Jan 29 08:03:45 crc kubenswrapper[4861]: I0129 08:03:45.189146 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e1272d2c77bf92f91cfc71b8fc84f367dd69597a4070f5d6f94e0571c4e7b8a" Jan 29 08:03:45 crc kubenswrapper[4861]: I0129 08:03:45.189219 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7pslv" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.987584 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-fb5p5"] Jan 29 08:03:46 crc kubenswrapper[4861]: E0129 08:03:46.988667 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d06c6849-8f0a-4626-9ae3-1922b1196771" containerName="mariadb-database-create" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.988693 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d06c6849-8f0a-4626-9ae3-1922b1196771" containerName="mariadb-database-create" Jan 29 08:03:46 crc kubenswrapper[4861]: E0129 08:03:46.988739 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af4e8282-a6ea-40e8-bdb6-72115771f88e" containerName="mariadb-account-create-update" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.988758 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="af4e8282-a6ea-40e8-bdb6-72115771f88e" containerName="mariadb-account-create-update" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.989055 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d06c6849-8f0a-4626-9ae3-1922b1196771" containerName="mariadb-database-create" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.989176 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="af4e8282-a6ea-40e8-bdb6-72115771f88e" containerName="mariadb-account-create-update" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.990131 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.992343 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kd89b" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.993795 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.993946 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 08:03:46 crc kubenswrapper[4861]: I0129 08:03:46.994049 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.002558 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-fb5p5"] Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.157170 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-combined-ca-bundle\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.157241 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-config-data\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.157433 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zn4j\" (UniqueName: \"kubernetes.io/projected/ed95aa8e-dd5c-4fb8-b415-03b895499221-kube-api-access-9zn4j\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.218444 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.258679 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-config-data\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.258733 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zn4j\" (UniqueName: \"kubernetes.io/projected/ed95aa8e-dd5c-4fb8-b415-03b895499221-kube-api-access-9zn4j\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.258861 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-combined-ca-bundle\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.265468 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-config-data\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.283400 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zn4j\" (UniqueName: \"kubernetes.io/projected/ed95aa8e-dd5c-4fb8-b415-03b895499221-kube-api-access-9zn4j\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.284316 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-combined-ca-bundle\") pod \"keystone-db-sync-fb5p5\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.334445 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:47 crc kubenswrapper[4861]: I0129 08:03:47.821544 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-fb5p5"] Jan 29 08:03:47 crc kubenswrapper[4861]: W0129 08:03:47.827685 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded95aa8e_dd5c_4fb8_b415_03b895499221.slice/crio-65446a5649c9cbd7a2f6320d05eda7e806fcb9ab6485643aa4bf75f99a6d52da WatchSource:0}: Error finding container 65446a5649c9cbd7a2f6320d05eda7e806fcb9ab6485643aa4bf75f99a6d52da: Status 404 returned error can't find the container with id 65446a5649c9cbd7a2f6320d05eda7e806fcb9ab6485643aa4bf75f99a6d52da Jan 29 08:03:48 crc kubenswrapper[4861]: I0129 08:03:48.224873 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-fb5p5" event={"ID":"ed95aa8e-dd5c-4fb8-b415-03b895499221","Type":"ContainerStarted","Data":"fb593a16c90450a1199b5f756278ac73394c2f844d143205139cd3dd8d01ed99"} Jan 29 08:03:48 crc kubenswrapper[4861]: I0129 08:03:48.224933 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-fb5p5" event={"ID":"ed95aa8e-dd5c-4fb8-b415-03b895499221","Type":"ContainerStarted","Data":"65446a5649c9cbd7a2f6320d05eda7e806fcb9ab6485643aa4bf75f99a6d52da"} Jan 29 08:03:48 crc kubenswrapper[4861]: I0129 08:03:48.246279 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-fb5p5" podStartSLOduration=2.246258513 podStartE2EDuration="2.246258513s" podCreationTimestamp="2026-01-29 08:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:48.239794544 +0000 UTC m=+5319.911289101" watchObservedRunningTime="2026-01-29 08:03:48.246258513 +0000 UTC m=+5319.917753080" Jan 29 08:03:50 crc kubenswrapper[4861]: I0129 08:03:50.244400 4861 generic.go:334] "Generic (PLEG): container finished" podID="ed95aa8e-dd5c-4fb8-b415-03b895499221" containerID="fb593a16c90450a1199b5f756278ac73394c2f844d143205139cd3dd8d01ed99" exitCode=0 Jan 29 08:03:50 crc kubenswrapper[4861]: I0129 08:03:50.244523 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-fb5p5" event={"ID":"ed95aa8e-dd5c-4fb8-b415-03b895499221","Type":"ContainerDied","Data":"fb593a16c90450a1199b5f756278ac73394c2f844d143205139cd3dd8d01ed99"} Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.719843 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.843452 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-combined-ca-bundle\") pod \"ed95aa8e-dd5c-4fb8-b415-03b895499221\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.843519 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-config-data\") pod \"ed95aa8e-dd5c-4fb8-b415-03b895499221\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.843622 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zn4j\" (UniqueName: \"kubernetes.io/projected/ed95aa8e-dd5c-4fb8-b415-03b895499221-kube-api-access-9zn4j\") pod \"ed95aa8e-dd5c-4fb8-b415-03b895499221\" (UID: \"ed95aa8e-dd5c-4fb8-b415-03b895499221\") " Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.851665 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed95aa8e-dd5c-4fb8-b415-03b895499221-kube-api-access-9zn4j" (OuterVolumeSpecName: "kube-api-access-9zn4j") pod "ed95aa8e-dd5c-4fb8-b415-03b895499221" (UID: "ed95aa8e-dd5c-4fb8-b415-03b895499221"). InnerVolumeSpecName "kube-api-access-9zn4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.873098 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed95aa8e-dd5c-4fb8-b415-03b895499221" (UID: "ed95aa8e-dd5c-4fb8-b415-03b895499221"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.906597 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-config-data" (OuterVolumeSpecName: "config-data") pod "ed95aa8e-dd5c-4fb8-b415-03b895499221" (UID: "ed95aa8e-dd5c-4fb8-b415-03b895499221"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.945742 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.945783 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed95aa8e-dd5c-4fb8-b415-03b895499221-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:51 crc kubenswrapper[4861]: I0129 08:03:51.945793 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zn4j\" (UniqueName: \"kubernetes.io/projected/ed95aa8e-dd5c-4fb8-b415-03b895499221-kube-api-access-9zn4j\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.265326 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-fb5p5" event={"ID":"ed95aa8e-dd5c-4fb8-b415-03b895499221","Type":"ContainerDied","Data":"65446a5649c9cbd7a2f6320d05eda7e806fcb9ab6485643aa4bf75f99a6d52da"} Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.265458 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65446a5649c9cbd7a2f6320d05eda7e806fcb9ab6485643aa4bf75f99a6d52da" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.265531 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-fb5p5" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.556038 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-46zkv"] Jan 29 08:03:52 crc kubenswrapper[4861]: E0129 08:03:52.556365 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed95aa8e-dd5c-4fb8-b415-03b895499221" containerName="keystone-db-sync" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.556380 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed95aa8e-dd5c-4fb8-b415-03b895499221" containerName="keystone-db-sync" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.556544 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed95aa8e-dd5c-4fb8-b415-03b895499221" containerName="keystone-db-sync" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.557036 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.560309 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.560332 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.560601 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.563192 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.563616 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kd89b" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.588335 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-46zkv"] Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.632316 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c5cdbc687-h2jsh"] Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.635350 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.647389 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c5cdbc687-h2jsh"] Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.659090 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxmmw\" (UniqueName: \"kubernetes.io/projected/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-kube-api-access-bxmmw\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.659186 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-scripts\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.659219 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-fernet-keys\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.659252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-config-data\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.659270 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-credential-keys\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.659292 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-combined-ca-bundle\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760484 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n76rs\" (UniqueName: \"kubernetes.io/projected/013166d7-5f55-45cd-914e-f3200cd9c79a-kube-api-access-n76rs\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760580 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-scripts\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760614 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-fernet-keys\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760651 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-config-data\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760668 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-credential-keys\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760687 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760711 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-combined-ca-bundle\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760726 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760770 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-config\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760789 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-dns-svc\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.760813 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxmmw\" (UniqueName: \"kubernetes.io/projected/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-kube-api-access-bxmmw\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.765269 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-fernet-keys\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.765864 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-scripts\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.768152 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-combined-ca-bundle\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.779495 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-config-data\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.780361 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-credential-keys\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.783110 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxmmw\" (UniqueName: \"kubernetes.io/projected/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-kube-api-access-bxmmw\") pod \"keystone-bootstrap-46zkv\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.862458 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.862497 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.862550 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-config\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.862572 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-dns-svc\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.862615 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n76rs\" (UniqueName: \"kubernetes.io/projected/013166d7-5f55-45cd-914e-f3200cd9c79a-kube-api-access-n76rs\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.863567 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.863651 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-config\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.863748 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-dns-svc\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.864041 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.873185 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.884910 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n76rs\" (UniqueName: \"kubernetes.io/projected/013166d7-5f55-45cd-914e-f3200cd9c79a-kube-api-access-n76rs\") pod \"dnsmasq-dns-5c5cdbc687-h2jsh\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:52 crc kubenswrapper[4861]: I0129 08:03:52.955686 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:53 crc kubenswrapper[4861]: I0129 08:03:53.386977 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-46zkv"] Jan 29 08:03:53 crc kubenswrapper[4861]: W0129 08:03:53.392029 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb08cfb2f_5b44_47ac_b346_82d058b0ffd7.slice/crio-6cd28ed7c64657c754b439e0ebe82613e86d6e8de6e9690c100da0ce0c002c8b WatchSource:0}: Error finding container 6cd28ed7c64657c754b439e0ebe82613e86d6e8de6e9690c100da0ce0c002c8b: Status 404 returned error can't find the container with id 6cd28ed7c64657c754b439e0ebe82613e86d6e8de6e9690c100da0ce0c002c8b Jan 29 08:03:53 crc kubenswrapper[4861]: I0129 08:03:53.474800 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c5cdbc687-h2jsh"] Jan 29 08:03:54 crc kubenswrapper[4861]: I0129 08:03:54.282014 4861 generic.go:334] "Generic (PLEG): container finished" podID="013166d7-5f55-45cd-914e-f3200cd9c79a" containerID="ac3e438dfcb634fce04956a8111f0a0091b6508d108a2e892c54a27a3f6c4fb6" exitCode=0 Jan 29 08:03:54 crc kubenswrapper[4861]: I0129 08:03:54.282588 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" event={"ID":"013166d7-5f55-45cd-914e-f3200cd9c79a","Type":"ContainerDied","Data":"ac3e438dfcb634fce04956a8111f0a0091b6508d108a2e892c54a27a3f6c4fb6"} Jan 29 08:03:54 crc kubenswrapper[4861]: I0129 08:03:54.282631 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" event={"ID":"013166d7-5f55-45cd-914e-f3200cd9c79a","Type":"ContainerStarted","Data":"258afa92900d241681f57f2b6ed275a0160751a0ea65b82487f5e31e663168ef"} Jan 29 08:03:54 crc kubenswrapper[4861]: I0129 08:03:54.290607 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-46zkv" event={"ID":"b08cfb2f-5b44-47ac-b346-82d058b0ffd7","Type":"ContainerStarted","Data":"e09b0918194f963720a53191a04b37942582b7019c3522a516b6ab0c78cc184c"} Jan 29 08:03:54 crc kubenswrapper[4861]: I0129 08:03:54.290678 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-46zkv" event={"ID":"b08cfb2f-5b44-47ac-b346-82d058b0ffd7","Type":"ContainerStarted","Data":"6cd28ed7c64657c754b439e0ebe82613e86d6e8de6e9690c100da0ce0c002c8b"} Jan 29 08:03:54 crc kubenswrapper[4861]: I0129 08:03:54.338279 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-46zkv" podStartSLOduration=2.3382507759999998 podStartE2EDuration="2.338250776s" podCreationTimestamp="2026-01-29 08:03:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:54.33150542 +0000 UTC m=+5326.002999987" watchObservedRunningTime="2026-01-29 08:03:54.338250776 +0000 UTC m=+5326.009745353" Jan 29 08:03:55 crc kubenswrapper[4861]: I0129 08:03:55.305489 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" event={"ID":"013166d7-5f55-45cd-914e-f3200cd9c79a","Type":"ContainerStarted","Data":"978ad967fa4f0bd4de7a5caa8b6028e4d4c369ab055cdbb2c3f9a9cd6048f762"} Jan 29 08:03:55 crc kubenswrapper[4861]: I0129 08:03:55.342605 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" podStartSLOduration=3.342560772 podStartE2EDuration="3.342560772s" podCreationTimestamp="2026-01-29 08:03:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:03:55.326761898 +0000 UTC m=+5326.998256475" watchObservedRunningTime="2026-01-29 08:03:55.342560772 +0000 UTC m=+5327.014055339" Jan 29 08:03:56 crc kubenswrapper[4861]: I0129 08:03:56.314727 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:03:57 crc kubenswrapper[4861]: I0129 08:03:57.328546 4861 generic.go:334] "Generic (PLEG): container finished" podID="b08cfb2f-5b44-47ac-b346-82d058b0ffd7" containerID="e09b0918194f963720a53191a04b37942582b7019c3522a516b6ab0c78cc184c" exitCode=0 Jan 29 08:03:57 crc kubenswrapper[4861]: I0129 08:03:57.328674 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-46zkv" event={"ID":"b08cfb2f-5b44-47ac-b346-82d058b0ffd7","Type":"ContainerDied","Data":"e09b0918194f963720a53191a04b37942582b7019c3522a516b6ab0c78cc184c"} Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.778894 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.889480 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-credential-keys\") pod \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.889838 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-scripts\") pod \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.889905 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-combined-ca-bundle\") pod \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.889947 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-fernet-keys\") pod \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.889981 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxmmw\" (UniqueName: \"kubernetes.io/projected/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-kube-api-access-bxmmw\") pod \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.890058 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-config-data\") pod \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\" (UID: \"b08cfb2f-5b44-47ac-b346-82d058b0ffd7\") " Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.895940 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b08cfb2f-5b44-47ac-b346-82d058b0ffd7" (UID: "b08cfb2f-5b44-47ac-b346-82d058b0ffd7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.896185 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-kube-api-access-bxmmw" (OuterVolumeSpecName: "kube-api-access-bxmmw") pod "b08cfb2f-5b44-47ac-b346-82d058b0ffd7" (UID: "b08cfb2f-5b44-47ac-b346-82d058b0ffd7"). InnerVolumeSpecName "kube-api-access-bxmmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.896701 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b08cfb2f-5b44-47ac-b346-82d058b0ffd7" (UID: "b08cfb2f-5b44-47ac-b346-82d058b0ffd7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.897470 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-scripts" (OuterVolumeSpecName: "scripts") pod "b08cfb2f-5b44-47ac-b346-82d058b0ffd7" (UID: "b08cfb2f-5b44-47ac-b346-82d058b0ffd7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.921249 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-config-data" (OuterVolumeSpecName: "config-data") pod "b08cfb2f-5b44-47ac-b346-82d058b0ffd7" (UID: "b08cfb2f-5b44-47ac-b346-82d058b0ffd7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.934849 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b08cfb2f-5b44-47ac-b346-82d058b0ffd7" (UID: "b08cfb2f-5b44-47ac-b346-82d058b0ffd7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.991883 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.992003 4861 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.992096 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxmmw\" (UniqueName: \"kubernetes.io/projected/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-kube-api-access-bxmmw\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.992171 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.992255 4861 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:58 crc kubenswrapper[4861]: I0129 08:03:58.992312 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b08cfb2f-5b44-47ac-b346-82d058b0ffd7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.350231 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-46zkv" event={"ID":"b08cfb2f-5b44-47ac-b346-82d058b0ffd7","Type":"ContainerDied","Data":"6cd28ed7c64657c754b439e0ebe82613e86d6e8de6e9690c100da0ce0c002c8b"} Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.350306 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cd28ed7c64657c754b439e0ebe82613e86d6e8de6e9690c100da0ce0c002c8b" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.350324 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-46zkv" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.542679 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-46zkv"] Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.550502 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-46zkv"] Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.572362 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-l8bkz"] Jan 29 08:03:59 crc kubenswrapper[4861]: E0129 08:03:59.572681 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b08cfb2f-5b44-47ac-b346-82d058b0ffd7" containerName="keystone-bootstrap" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.572699 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b08cfb2f-5b44-47ac-b346-82d058b0ffd7" containerName="keystone-bootstrap" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.573583 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b08cfb2f-5b44-47ac-b346-82d058b0ffd7" containerName="keystone-bootstrap" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.574110 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.576792 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.576885 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.576979 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kd89b" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.577050 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.578345 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.588628 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l8bkz"] Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.601969 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-config-data\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.602022 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-combined-ca-bundle\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.602060 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqtdx\" (UniqueName: \"kubernetes.io/projected/b25499ba-e788-480f-87c8-f6e8b2178236-kube-api-access-lqtdx\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.602110 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-credential-keys\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.602156 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-scripts\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.602214 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-fernet-keys\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.703744 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-fernet-keys\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.703887 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-config-data\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.703930 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-combined-ca-bundle\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.703993 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqtdx\" (UniqueName: \"kubernetes.io/projected/b25499ba-e788-480f-87c8-f6e8b2178236-kube-api-access-lqtdx\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.704047 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-credential-keys\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.704151 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-scripts\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.710618 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-combined-ca-bundle\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.710795 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-config-data\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.710863 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-scripts\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.711039 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-credential-keys\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.716611 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-fernet-keys\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.724285 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqtdx\" (UniqueName: \"kubernetes.io/projected/b25499ba-e788-480f-87c8-f6e8b2178236-kube-api-access-lqtdx\") pod \"keystone-bootstrap-l8bkz\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:03:59 crc kubenswrapper[4861]: I0129 08:03:59.889431 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:04:00 crc kubenswrapper[4861]: I0129 08:04:00.426574 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l8bkz"] Jan 29 08:04:01 crc kubenswrapper[4861]: I0129 08:04:01.131012 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b08cfb2f-5b44-47ac-b346-82d058b0ffd7" path="/var/lib/kubelet/pods/b08cfb2f-5b44-47ac-b346-82d058b0ffd7/volumes" Jan 29 08:04:01 crc kubenswrapper[4861]: I0129 08:04:01.371835 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l8bkz" event={"ID":"b25499ba-e788-480f-87c8-f6e8b2178236","Type":"ContainerStarted","Data":"f35ec5af27d2d883bceee71bf96b4729804a12e136fbc7a704c3a7825ea81508"} Jan 29 08:04:01 crc kubenswrapper[4861]: I0129 08:04:01.371930 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l8bkz" event={"ID":"b25499ba-e788-480f-87c8-f6e8b2178236","Type":"ContainerStarted","Data":"0a1254cdb2fb27b4e88205de1c107cc382cc4fe9fc4ec540731d1bbdb4446ada"} Jan 29 08:04:01 crc kubenswrapper[4861]: I0129 08:04:01.395193 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-l8bkz" podStartSLOduration=2.395174193 podStartE2EDuration="2.395174193s" podCreationTimestamp="2026-01-29 08:03:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:04:01.390961253 +0000 UTC m=+5333.062455810" watchObservedRunningTime="2026-01-29 08:04:01.395174193 +0000 UTC m=+5333.066668750" Jan 29 08:04:02 crc kubenswrapper[4861]: I0129 08:04:02.958485 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.057877 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fbd77559-nvbhp"] Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.058295 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" podUID="b928b334-0553-43a5-9572-f900b2e0fafd" containerName="dnsmasq-dns" containerID="cri-o://7e6e918a09746400101728071b647975eda426e6c7b4f8c2ce273ca5a436ae1f" gracePeriod=10 Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.404322 4861 generic.go:334] "Generic (PLEG): container finished" podID="b928b334-0553-43a5-9572-f900b2e0fafd" containerID="7e6e918a09746400101728071b647975eda426e6c7b4f8c2ce273ca5a436ae1f" exitCode=0 Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.404413 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" event={"ID":"b928b334-0553-43a5-9572-f900b2e0fafd","Type":"ContainerDied","Data":"7e6e918a09746400101728071b647975eda426e6c7b4f8c2ce273ca5a436ae1f"} Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.753983 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.808923 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-sb\") pod \"b928b334-0553-43a5-9572-f900b2e0fafd\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.809098 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-nb\") pod \"b928b334-0553-43a5-9572-f900b2e0fafd\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.809258 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-config\") pod \"b928b334-0553-43a5-9572-f900b2e0fafd\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.809343 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp9wc\" (UniqueName: \"kubernetes.io/projected/b928b334-0553-43a5-9572-f900b2e0fafd-kube-api-access-qp9wc\") pod \"b928b334-0553-43a5-9572-f900b2e0fafd\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.809417 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-dns-svc\") pod \"b928b334-0553-43a5-9572-f900b2e0fafd\" (UID: \"b928b334-0553-43a5-9572-f900b2e0fafd\") " Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.822118 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b928b334-0553-43a5-9572-f900b2e0fafd-kube-api-access-qp9wc" (OuterVolumeSpecName: "kube-api-access-qp9wc") pod "b928b334-0553-43a5-9572-f900b2e0fafd" (UID: "b928b334-0553-43a5-9572-f900b2e0fafd"). InnerVolumeSpecName "kube-api-access-qp9wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.872292 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b928b334-0553-43a5-9572-f900b2e0fafd" (UID: "b928b334-0553-43a5-9572-f900b2e0fafd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.884399 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-config" (OuterVolumeSpecName: "config") pod "b928b334-0553-43a5-9572-f900b2e0fafd" (UID: "b928b334-0553-43a5-9572-f900b2e0fafd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.885419 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b928b334-0553-43a5-9572-f900b2e0fafd" (UID: "b928b334-0553-43a5-9572-f900b2e0fafd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.894223 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b928b334-0553-43a5-9572-f900b2e0fafd" (UID: "b928b334-0553-43a5-9572-f900b2e0fafd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.911535 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.911570 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.911584 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp9wc\" (UniqueName: \"kubernetes.io/projected/b928b334-0553-43a5-9572-f900b2e0fafd-kube-api-access-qp9wc\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.911594 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:03 crc kubenswrapper[4861]: I0129 08:04:03.911602 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b928b334-0553-43a5-9572-f900b2e0fafd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:04 crc kubenswrapper[4861]: I0129 08:04:04.418903 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" event={"ID":"b928b334-0553-43a5-9572-f900b2e0fafd","Type":"ContainerDied","Data":"eb84d9c8062a89a5212d2c45b5d4d9a55589c45bc299a032c117a4b827fb90b4"} Jan 29 08:04:04 crc kubenswrapper[4861]: I0129 08:04:04.419013 4861 scope.go:117] "RemoveContainer" containerID="7e6e918a09746400101728071b647975eda426e6c7b4f8c2ce273ca5a436ae1f" Jan 29 08:04:04 crc kubenswrapper[4861]: I0129 08:04:04.418942 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fbd77559-nvbhp" Jan 29 08:04:04 crc kubenswrapper[4861]: I0129 08:04:04.421758 4861 generic.go:334] "Generic (PLEG): container finished" podID="b25499ba-e788-480f-87c8-f6e8b2178236" containerID="f35ec5af27d2d883bceee71bf96b4729804a12e136fbc7a704c3a7825ea81508" exitCode=0 Jan 29 08:04:04 crc kubenswrapper[4861]: I0129 08:04:04.421833 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l8bkz" event={"ID":"b25499ba-e788-480f-87c8-f6e8b2178236","Type":"ContainerDied","Data":"f35ec5af27d2d883bceee71bf96b4729804a12e136fbc7a704c3a7825ea81508"} Jan 29 08:04:04 crc kubenswrapper[4861]: I0129 08:04:04.457653 4861 scope.go:117] "RemoveContainer" containerID="c102c68d6af69ced1e949ac7020c9579018acf179075177009c8264e6662e1eb" Jan 29 08:04:04 crc kubenswrapper[4861]: I0129 08:04:04.496223 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fbd77559-nvbhp"] Jan 29 08:04:04 crc kubenswrapper[4861]: I0129 08:04:04.504353 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67fbd77559-nvbhp"] Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.132391 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b928b334-0553-43a5-9572-f900b2e0fafd" path="/var/lib/kubelet/pods/b928b334-0553-43a5-9572-f900b2e0fafd/volumes" Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.890849 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.953170 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-config-data\") pod \"b25499ba-e788-480f-87c8-f6e8b2178236\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.953242 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-fernet-keys\") pod \"b25499ba-e788-480f-87c8-f6e8b2178236\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.953369 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-combined-ca-bundle\") pod \"b25499ba-e788-480f-87c8-f6e8b2178236\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.953451 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-scripts\") pod \"b25499ba-e788-480f-87c8-f6e8b2178236\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.953499 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqtdx\" (UniqueName: \"kubernetes.io/projected/b25499ba-e788-480f-87c8-f6e8b2178236-kube-api-access-lqtdx\") pod \"b25499ba-e788-480f-87c8-f6e8b2178236\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.953585 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-credential-keys\") pod \"b25499ba-e788-480f-87c8-f6e8b2178236\" (UID: \"b25499ba-e788-480f-87c8-f6e8b2178236\") " Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.960005 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b25499ba-e788-480f-87c8-f6e8b2178236" (UID: "b25499ba-e788-480f-87c8-f6e8b2178236"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.960724 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b25499ba-e788-480f-87c8-f6e8b2178236-kube-api-access-lqtdx" (OuterVolumeSpecName: "kube-api-access-lqtdx") pod "b25499ba-e788-480f-87c8-f6e8b2178236" (UID: "b25499ba-e788-480f-87c8-f6e8b2178236"). InnerVolumeSpecName "kube-api-access-lqtdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.963533 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b25499ba-e788-480f-87c8-f6e8b2178236" (UID: "b25499ba-e788-480f-87c8-f6e8b2178236"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.965370 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-scripts" (OuterVolumeSpecName: "scripts") pod "b25499ba-e788-480f-87c8-f6e8b2178236" (UID: "b25499ba-e788-480f-87c8-f6e8b2178236"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.987361 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-config-data" (OuterVolumeSpecName: "config-data") pod "b25499ba-e788-480f-87c8-f6e8b2178236" (UID: "b25499ba-e788-480f-87c8-f6e8b2178236"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:04:05 crc kubenswrapper[4861]: I0129 08:04:05.999290 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b25499ba-e788-480f-87c8-f6e8b2178236" (UID: "b25499ba-e788-480f-87c8-f6e8b2178236"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.055882 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.055915 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.055925 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqtdx\" (UniqueName: \"kubernetes.io/projected/b25499ba-e788-480f-87c8-f6e8b2178236-kube-api-access-lqtdx\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.055938 4861 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.055946 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.055954 4861 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b25499ba-e788-480f-87c8-f6e8b2178236-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.447794 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l8bkz" event={"ID":"b25499ba-e788-480f-87c8-f6e8b2178236","Type":"ContainerDied","Data":"0a1254cdb2fb27b4e88205de1c107cc382cc4fe9fc4ec540731d1bbdb4446ada"} Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.448401 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a1254cdb2fb27b4e88205de1c107cc382cc4fe9fc4ec540731d1bbdb4446ada" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.447895 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l8bkz" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.573153 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6587f5774c-8wdxx"] Jan 29 08:04:06 crc kubenswrapper[4861]: E0129 08:04:06.573661 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b928b334-0553-43a5-9572-f900b2e0fafd" containerName="dnsmasq-dns" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.573683 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b928b334-0553-43a5-9572-f900b2e0fafd" containerName="dnsmasq-dns" Jan 29 08:04:06 crc kubenswrapper[4861]: E0129 08:04:06.573727 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b25499ba-e788-480f-87c8-f6e8b2178236" containerName="keystone-bootstrap" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.573736 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b25499ba-e788-480f-87c8-f6e8b2178236" containerName="keystone-bootstrap" Jan 29 08:04:06 crc kubenswrapper[4861]: E0129 08:04:06.573754 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b928b334-0553-43a5-9572-f900b2e0fafd" containerName="init" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.573763 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b928b334-0553-43a5-9572-f900b2e0fafd" containerName="init" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.573970 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b25499ba-e788-480f-87c8-f6e8b2178236" containerName="keystone-bootstrap" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.574025 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b928b334-0553-43a5-9572-f900b2e0fafd" containerName="dnsmasq-dns" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.575122 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.578509 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kd89b" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.578744 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.578895 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.579196 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.579479 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.579741 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.597223 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6587f5774c-8wdxx"] Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.671104 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-combined-ca-bundle\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.671450 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-public-tls-certs\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.671537 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-credential-keys\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.671657 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-internal-tls-certs\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.671856 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-config-data\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.671971 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-scripts\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.672030 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cb4n\" (UniqueName: \"kubernetes.io/projected/abc77fef-ccca-488a-af89-450aa3b0836e-kube-api-access-9cb4n\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.672108 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-fernet-keys\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.774160 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-config-data\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.774264 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-scripts\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.774307 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cb4n\" (UniqueName: \"kubernetes.io/projected/abc77fef-ccca-488a-af89-450aa3b0836e-kube-api-access-9cb4n\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.774349 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-fernet-keys\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.774385 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-combined-ca-bundle\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.774418 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-public-tls-certs\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.774442 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-credential-keys\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.774504 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-internal-tls-certs\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.780663 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-public-tls-certs\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.781013 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-fernet-keys\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.781526 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-scripts\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.781622 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-combined-ca-bundle\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.782911 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-config-data\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.783452 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-credential-keys\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.784839 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/abc77fef-ccca-488a-af89-450aa3b0836e-internal-tls-certs\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.798592 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cb4n\" (UniqueName: \"kubernetes.io/projected/abc77fef-ccca-488a-af89-450aa3b0836e-kube-api-access-9cb4n\") pod \"keystone-6587f5774c-8wdxx\" (UID: \"abc77fef-ccca-488a-af89-450aa3b0836e\") " pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:06 crc kubenswrapper[4861]: I0129 08:04:06.902779 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:07 crc kubenswrapper[4861]: I0129 08:04:07.474545 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6587f5774c-8wdxx"] Jan 29 08:04:08 crc kubenswrapper[4861]: I0129 08:04:08.469518 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6587f5774c-8wdxx" event={"ID":"abc77fef-ccca-488a-af89-450aa3b0836e","Type":"ContainerStarted","Data":"3c7b7c95e896a9c96ff0da7a58f74fe6f40c4fb1515dfd68556ab35e08581d04"} Jan 29 08:04:08 crc kubenswrapper[4861]: I0129 08:04:08.469882 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6587f5774c-8wdxx" event={"ID":"abc77fef-ccca-488a-af89-450aa3b0836e","Type":"ContainerStarted","Data":"eebd87459fcc20049ef4075b9850ebd42f463859217a57bacff21121d2188cd7"} Jan 29 08:04:08 crc kubenswrapper[4861]: I0129 08:04:08.470026 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:08 crc kubenswrapper[4861]: I0129 08:04:08.500383 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6587f5774c-8wdxx" podStartSLOduration=2.500359735 podStartE2EDuration="2.500359735s" podCreationTimestamp="2026-01-29 08:04:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:04:08.495839606 +0000 UTC m=+5340.167334233" watchObservedRunningTime="2026-01-29 08:04:08.500359735 +0000 UTC m=+5340.171854302" Jan 29 08:04:38 crc kubenswrapper[4861]: I0129 08:04:38.466118 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6587f5774c-8wdxx" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.464669 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.466964 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.469882 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.469859 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.470512 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-9bvg4" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.476968 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.546407 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.546515 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config-secret\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.546662 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.546691 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgf7d\" (UniqueName: \"kubernetes.io/projected/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-kube-api-access-rgf7d\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.648395 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config-secret\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.648537 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.648571 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgf7d\" (UniqueName: \"kubernetes.io/projected/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-kube-api-access-rgf7d\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.648612 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.649959 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.656499 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-combined-ca-bundle\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.669530 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config-secret\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.670271 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgf7d\" (UniqueName: \"kubernetes.io/projected/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-kube-api-access-rgf7d\") pod \"openstackclient\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " pod="openstack/openstackclient" Jan 29 08:04:41 crc kubenswrapper[4861]: I0129 08:04:41.818580 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:04:42 crc kubenswrapper[4861]: I0129 08:04:42.249965 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 08:04:42 crc kubenswrapper[4861]: W0129 08:04:42.254586 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ce50e42_2140_459d_9bfe_59c23bcb8ae2.slice/crio-b0cdb4b487ba687ae839d47fc80a8cc9439b033c2a8716fe8e0132b25bc412b8 WatchSource:0}: Error finding container b0cdb4b487ba687ae839d47fc80a8cc9439b033c2a8716fe8e0132b25bc412b8: Status 404 returned error can't find the container with id b0cdb4b487ba687ae839d47fc80a8cc9439b033c2a8716fe8e0132b25bc412b8 Jan 29 08:04:42 crc kubenswrapper[4861]: I0129 08:04:42.784930 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"9ce50e42-2140-459d-9bfe-59c23bcb8ae2","Type":"ContainerStarted","Data":"a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54"} Jan 29 08:04:42 crc kubenswrapper[4861]: I0129 08:04:42.785418 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"9ce50e42-2140-459d-9bfe-59c23bcb8ae2","Type":"ContainerStarted","Data":"b0cdb4b487ba687ae839d47fc80a8cc9439b033c2a8716fe8e0132b25bc412b8"} Jan 29 08:04:42 crc kubenswrapper[4861]: I0129 08:04:42.804292 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.8042691830000002 podStartE2EDuration="1.804269183s" podCreationTimestamp="2026-01-29 08:04:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:04:42.802039195 +0000 UTC m=+5374.473533792" watchObservedRunningTime="2026-01-29 08:04:42.804269183 +0000 UTC m=+5374.475763750" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.324974 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ppb79"] Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.327358 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.334536 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ppb79"] Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.410259 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zznn\" (UniqueName: \"kubernetes.io/projected/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-kube-api-access-9zznn\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.410363 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-catalog-content\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.410382 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-utilities\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.511630 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-catalog-content\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.511920 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-utilities\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.512098 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zznn\" (UniqueName: \"kubernetes.io/projected/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-kube-api-access-9zznn\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.512365 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-utilities\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.512609 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-catalog-content\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.539966 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zznn\" (UniqueName: \"kubernetes.io/projected/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-kube-api-access-9zznn\") pod \"community-operators-ppb79\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:03 crc kubenswrapper[4861]: I0129 08:05:03.647554 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:04 crc kubenswrapper[4861]: I0129 08:05:04.164419 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ppb79"] Jan 29 08:05:05 crc kubenswrapper[4861]: I0129 08:05:05.020241 4861 generic.go:334] "Generic (PLEG): container finished" podID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerID="2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95" exitCode=0 Jan 29 08:05:05 crc kubenswrapper[4861]: I0129 08:05:05.020277 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ppb79" event={"ID":"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc","Type":"ContainerDied","Data":"2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95"} Jan 29 08:05:05 crc kubenswrapper[4861]: I0129 08:05:05.020539 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ppb79" event={"ID":"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc","Type":"ContainerStarted","Data":"0ceccb892959c11e856ee19257bf7a1cd17feecc60bce10f06a5e09f12d25571"} Jan 29 08:05:05 crc kubenswrapper[4861]: I0129 08:05:05.022657 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:05:06 crc kubenswrapper[4861]: I0129 08:05:06.029684 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ppb79" event={"ID":"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc","Type":"ContainerStarted","Data":"8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374"} Jan 29 08:05:07 crc kubenswrapper[4861]: I0129 08:05:07.037682 4861 generic.go:334] "Generic (PLEG): container finished" podID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerID="8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374" exitCode=0 Jan 29 08:05:07 crc kubenswrapper[4861]: I0129 08:05:07.037727 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ppb79" event={"ID":"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc","Type":"ContainerDied","Data":"8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374"} Jan 29 08:05:08 crc kubenswrapper[4861]: I0129 08:05:08.051296 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ppb79" event={"ID":"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc","Type":"ContainerStarted","Data":"6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4"} Jan 29 08:05:08 crc kubenswrapper[4861]: I0129 08:05:08.072568 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ppb79" podStartSLOduration=2.647941982 podStartE2EDuration="5.072537678s" podCreationTimestamp="2026-01-29 08:05:03 +0000 UTC" firstStartedPulling="2026-01-29 08:05:05.022186232 +0000 UTC m=+5396.693680819" lastFinishedPulling="2026-01-29 08:05:07.446781958 +0000 UTC m=+5399.118276515" observedRunningTime="2026-01-29 08:05:08.068570754 +0000 UTC m=+5399.740065321" watchObservedRunningTime="2026-01-29 08:05:08.072537678 +0000 UTC m=+5399.744032245" Jan 29 08:05:13 crc kubenswrapper[4861]: I0129 08:05:13.648398 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:13 crc kubenswrapper[4861]: I0129 08:05:13.649092 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:13 crc kubenswrapper[4861]: I0129 08:05:13.729337 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:14 crc kubenswrapper[4861]: I0129 08:05:14.169203 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:14 crc kubenswrapper[4861]: I0129 08:05:14.914156 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ppb79"] Jan 29 08:05:16 crc kubenswrapper[4861]: I0129 08:05:16.137769 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ppb79" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerName="registry-server" containerID="cri-o://6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4" gracePeriod=2 Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.091331 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.151311 4861 generic.go:334] "Generic (PLEG): container finished" podID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerID="6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4" exitCode=0 Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.151377 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ppb79" event={"ID":"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc","Type":"ContainerDied","Data":"6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4"} Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.151416 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ppb79" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.151447 4861 scope.go:117] "RemoveContainer" containerID="6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.151428 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ppb79" event={"ID":"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc","Type":"ContainerDied","Data":"0ceccb892959c11e856ee19257bf7a1cd17feecc60bce10f06a5e09f12d25571"} Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.184025 4861 scope.go:117] "RemoveContainer" containerID="8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.213008 4861 scope.go:117] "RemoveContainer" containerID="2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.255282 4861 scope.go:117] "RemoveContainer" containerID="6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4" Jan 29 08:05:17 crc kubenswrapper[4861]: E0129 08:05:17.255732 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4\": container with ID starting with 6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4 not found: ID does not exist" containerID="6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.255781 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4"} err="failed to get container status \"6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4\": rpc error: code = NotFound desc = could not find container \"6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4\": container with ID starting with 6dac63fbf8d904bae060f1244e1237d244ea6e698ef9b518e6e80cc3b02ac3f4 not found: ID does not exist" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.255815 4861 scope.go:117] "RemoveContainer" containerID="8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374" Jan 29 08:05:17 crc kubenswrapper[4861]: E0129 08:05:17.256344 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374\": container with ID starting with 8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374 not found: ID does not exist" containerID="8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.256408 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374"} err="failed to get container status \"8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374\": rpc error: code = NotFound desc = could not find container \"8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374\": container with ID starting with 8f242a75a264536491cdb8e1de1f10555646f2b9fef0b62913682903c45d2374 not found: ID does not exist" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.256450 4861 scope.go:117] "RemoveContainer" containerID="2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95" Jan 29 08:05:17 crc kubenswrapper[4861]: E0129 08:05:17.256821 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95\": container with ID starting with 2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95 not found: ID does not exist" containerID="2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.256849 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95"} err="failed to get container status \"2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95\": rpc error: code = NotFound desc = could not find container \"2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95\": container with ID starting with 2a8dceae42b242526c81c0f986f63cc420b049493cdd12839bd28ebdac0c8a95 not found: ID does not exist" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.286725 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-utilities\") pod \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.286945 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zznn\" (UniqueName: \"kubernetes.io/projected/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-kube-api-access-9zznn\") pod \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.287012 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-catalog-content\") pod \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\" (UID: \"6614b17a-0187-4f7b-a5a0-d4d7a856c3cc\") " Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.288420 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-utilities" (OuterVolumeSpecName: "utilities") pod "6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" (UID: "6614b17a-0187-4f7b-a5a0-d4d7a856c3cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.289665 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.296489 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-kube-api-access-9zznn" (OuterVolumeSpecName: "kube-api-access-9zznn") pod "6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" (UID: "6614b17a-0187-4f7b-a5a0-d4d7a856c3cc"). InnerVolumeSpecName "kube-api-access-9zznn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.346692 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" (UID: "6614b17a-0187-4f7b-a5a0-d4d7a856c3cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.390870 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.390911 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zznn\" (UniqueName: \"kubernetes.io/projected/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc-kube-api-access-9zznn\") on node \"crc\" DevicePath \"\"" Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.492408 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ppb79"] Jan 29 08:05:17 crc kubenswrapper[4861]: I0129 08:05:17.510296 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ppb79"] Jan 29 08:05:19 crc kubenswrapper[4861]: I0129 08:05:19.136870 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" path="/var/lib/kubelet/pods/6614b17a-0187-4f7b-a5a0-d4d7a856c3cc/volumes" Jan 29 08:05:45 crc kubenswrapper[4861]: E0129 08:05:45.065517 4861 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.80:59580->38.102.83.80:46667: write tcp 38.102.83.80:59580->38.102.83.80:46667: write: broken pipe Jan 29 08:06:00 crc kubenswrapper[4861]: I0129 08:06:00.629815 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:06:00 crc kubenswrapper[4861]: I0129 08:06:00.631433 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.572621 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-67zds"] Jan 29 08:06:17 crc kubenswrapper[4861]: E0129 08:06:17.573417 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerName="extract-content" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.573429 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerName="extract-content" Jan 29 08:06:17 crc kubenswrapper[4861]: E0129 08:06:17.573443 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerName="extract-utilities" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.573449 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerName="extract-utilities" Jan 29 08:06:17 crc kubenswrapper[4861]: E0129 08:06:17.573469 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerName="registry-server" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.573476 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerName="registry-server" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.573645 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6614b17a-0187-4f7b-a5a0-d4d7a856c3cc" containerName="registry-server" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.574922 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.598686 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-67zds"] Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.772550 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-utilities\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.772633 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-catalog-content\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.772673 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sftx\" (UniqueName: \"kubernetes.io/projected/94cb18e9-8205-47ce-af00-713abf4bae34-kube-api-access-8sftx\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.874495 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-utilities\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.874578 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-catalog-content\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.874620 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sftx\" (UniqueName: \"kubernetes.io/projected/94cb18e9-8205-47ce-af00-713abf4bae34-kube-api-access-8sftx\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.874983 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-catalog-content\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.875130 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-utilities\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.907214 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sftx\" (UniqueName: \"kubernetes.io/projected/94cb18e9-8205-47ce-af00-713abf4bae34-kube-api-access-8sftx\") pod \"redhat-operators-67zds\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:17 crc kubenswrapper[4861]: I0129 08:06:17.951959 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:18 crc kubenswrapper[4861]: I0129 08:06:18.410507 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-67zds"] Jan 29 08:06:18 crc kubenswrapper[4861]: I0129 08:06:18.806065 4861 generic.go:334] "Generic (PLEG): container finished" podID="94cb18e9-8205-47ce-af00-713abf4bae34" containerID="bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071" exitCode=0 Jan 29 08:06:18 crc kubenswrapper[4861]: I0129 08:06:18.806128 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67zds" event={"ID":"94cb18e9-8205-47ce-af00-713abf4bae34","Type":"ContainerDied","Data":"bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071"} Jan 29 08:06:18 crc kubenswrapper[4861]: I0129 08:06:18.806180 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67zds" event={"ID":"94cb18e9-8205-47ce-af00-713abf4bae34","Type":"ContainerStarted","Data":"63629f082e4a1aa6bf3ef3b583cc5d11bfee663890e3e60e605107a64b654dc6"} Jan 29 08:06:19 crc kubenswrapper[4861]: I0129 08:06:19.814905 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67zds" event={"ID":"94cb18e9-8205-47ce-af00-713abf4bae34","Type":"ContainerStarted","Data":"0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a"} Jan 29 08:06:20 crc kubenswrapper[4861]: I0129 08:06:20.834119 4861 generic.go:334] "Generic (PLEG): container finished" podID="94cb18e9-8205-47ce-af00-713abf4bae34" containerID="0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a" exitCode=0 Jan 29 08:06:20 crc kubenswrapper[4861]: I0129 08:06:20.834209 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67zds" event={"ID":"94cb18e9-8205-47ce-af00-713abf4bae34","Type":"ContainerDied","Data":"0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a"} Jan 29 08:06:21 crc kubenswrapper[4861]: I0129 08:06:21.844845 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67zds" event={"ID":"94cb18e9-8205-47ce-af00-713abf4bae34","Type":"ContainerStarted","Data":"25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95"} Jan 29 08:06:21 crc kubenswrapper[4861]: I0129 08:06:21.874434 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-67zds" podStartSLOduration=2.45885 podStartE2EDuration="4.874416449s" podCreationTimestamp="2026-01-29 08:06:17 +0000 UTC" firstStartedPulling="2026-01-29 08:06:18.809952654 +0000 UTC m=+5470.481447211" lastFinishedPulling="2026-01-29 08:06:21.225519103 +0000 UTC m=+5472.897013660" observedRunningTime="2026-01-29 08:06:21.865056044 +0000 UTC m=+5473.536550611" watchObservedRunningTime="2026-01-29 08:06:21.874416449 +0000 UTC m=+5473.545911006" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.492747 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-bbs2g"] Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.493670 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.511343 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-bbs2g"] Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.601302 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-a323-account-create-update-qmmlf"] Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.602603 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.603942 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.618005 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a323-account-create-update-qmmlf"] Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.667866 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsjzt\" (UniqueName: \"kubernetes.io/projected/00b75b77-6e85-4af7-be07-ed3bd7c338db-kube-api-access-dsjzt\") pod \"barbican-db-create-bbs2g\" (UID: \"00b75b77-6e85-4af7-be07-ed3bd7c338db\") " pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.667936 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00b75b77-6e85-4af7-be07-ed3bd7c338db-operator-scripts\") pod \"barbican-db-create-bbs2g\" (UID: \"00b75b77-6e85-4af7-be07-ed3bd7c338db\") " pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.769421 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsjzt\" (UniqueName: \"kubernetes.io/projected/00b75b77-6e85-4af7-be07-ed3bd7c338db-kube-api-access-dsjzt\") pod \"barbican-db-create-bbs2g\" (UID: \"00b75b77-6e85-4af7-be07-ed3bd7c338db\") " pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.769496 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmxpj\" (UniqueName: \"kubernetes.io/projected/134b5d13-5549-459c-8a27-2e5908651fb0-kube-api-access-tmxpj\") pod \"barbican-a323-account-create-update-qmmlf\" (UID: \"134b5d13-5549-459c-8a27-2e5908651fb0\") " pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.769548 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00b75b77-6e85-4af7-be07-ed3bd7c338db-operator-scripts\") pod \"barbican-db-create-bbs2g\" (UID: \"00b75b77-6e85-4af7-be07-ed3bd7c338db\") " pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.769641 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/134b5d13-5549-459c-8a27-2e5908651fb0-operator-scripts\") pod \"barbican-a323-account-create-update-qmmlf\" (UID: \"134b5d13-5549-459c-8a27-2e5908651fb0\") " pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.771489 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00b75b77-6e85-4af7-be07-ed3bd7c338db-operator-scripts\") pod \"barbican-db-create-bbs2g\" (UID: \"00b75b77-6e85-4af7-be07-ed3bd7c338db\") " pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.801639 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsjzt\" (UniqueName: \"kubernetes.io/projected/00b75b77-6e85-4af7-be07-ed3bd7c338db-kube-api-access-dsjzt\") pod \"barbican-db-create-bbs2g\" (UID: \"00b75b77-6e85-4af7-be07-ed3bd7c338db\") " pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.856273 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.871134 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmxpj\" (UniqueName: \"kubernetes.io/projected/134b5d13-5549-459c-8a27-2e5908651fb0-kube-api-access-tmxpj\") pod \"barbican-a323-account-create-update-qmmlf\" (UID: \"134b5d13-5549-459c-8a27-2e5908651fb0\") " pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.871207 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/134b5d13-5549-459c-8a27-2e5908651fb0-operator-scripts\") pod \"barbican-a323-account-create-update-qmmlf\" (UID: \"134b5d13-5549-459c-8a27-2e5908651fb0\") " pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.871810 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/134b5d13-5549-459c-8a27-2e5908651fb0-operator-scripts\") pod \"barbican-a323-account-create-update-qmmlf\" (UID: \"134b5d13-5549-459c-8a27-2e5908651fb0\") " pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.897781 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmxpj\" (UniqueName: \"kubernetes.io/projected/134b5d13-5549-459c-8a27-2e5908651fb0-kube-api-access-tmxpj\") pod \"barbican-a323-account-create-update-qmmlf\" (UID: \"134b5d13-5549-459c-8a27-2e5908651fb0\") " pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:22 crc kubenswrapper[4861]: I0129 08:06:22.923520 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:23 crc kubenswrapper[4861]: I0129 08:06:23.208241 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a323-account-create-update-qmmlf"] Jan 29 08:06:23 crc kubenswrapper[4861]: I0129 08:06:23.333976 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-bbs2g"] Jan 29 08:06:23 crc kubenswrapper[4861]: W0129 08:06:23.337208 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00b75b77_6e85_4af7_be07_ed3bd7c338db.slice/crio-d412a455ac831ef570257173fc6f07e873af3ee69f669c6071de8f1dfa8184fc WatchSource:0}: Error finding container d412a455ac831ef570257173fc6f07e873af3ee69f669c6071de8f1dfa8184fc: Status 404 returned error can't find the container with id d412a455ac831ef570257173fc6f07e873af3ee69f669c6071de8f1dfa8184fc Jan 29 08:06:23 crc kubenswrapper[4861]: I0129 08:06:23.870844 4861 generic.go:334] "Generic (PLEG): container finished" podID="134b5d13-5549-459c-8a27-2e5908651fb0" containerID="825ae3007e911e3b112efced3bd22de746000938f42aa19c9ececbace473b492" exitCode=0 Jan 29 08:06:23 crc kubenswrapper[4861]: I0129 08:06:23.870952 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a323-account-create-update-qmmlf" event={"ID":"134b5d13-5549-459c-8a27-2e5908651fb0","Type":"ContainerDied","Data":"825ae3007e911e3b112efced3bd22de746000938f42aa19c9ececbace473b492"} Jan 29 08:06:23 crc kubenswrapper[4861]: I0129 08:06:23.872331 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a323-account-create-update-qmmlf" event={"ID":"134b5d13-5549-459c-8a27-2e5908651fb0","Type":"ContainerStarted","Data":"6bb185ef269819f99b9fea0be4db1405aac9ea5fefd3549685ab7c19cbb6f36e"} Jan 29 08:06:23 crc kubenswrapper[4861]: I0129 08:06:23.876172 4861 generic.go:334] "Generic (PLEG): container finished" podID="00b75b77-6e85-4af7-be07-ed3bd7c338db" containerID="40a15692810876a8d71c0c27b58259b2f6e27e1935f49a39d3c1ca79b8166970" exitCode=0 Jan 29 08:06:23 crc kubenswrapper[4861]: I0129 08:06:23.876211 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bbs2g" event={"ID":"00b75b77-6e85-4af7-be07-ed3bd7c338db","Type":"ContainerDied","Data":"40a15692810876a8d71c0c27b58259b2f6e27e1935f49a39d3c1ca79b8166970"} Jan 29 08:06:23 crc kubenswrapper[4861]: I0129 08:06:23.876232 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bbs2g" event={"ID":"00b75b77-6e85-4af7-be07-ed3bd7c338db","Type":"ContainerStarted","Data":"d412a455ac831ef570257173fc6f07e873af3ee69f669c6071de8f1dfa8184fc"} Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.346332 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.351665 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.437831 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00b75b77-6e85-4af7-be07-ed3bd7c338db-operator-scripts\") pod \"00b75b77-6e85-4af7-be07-ed3bd7c338db\" (UID: \"00b75b77-6e85-4af7-be07-ed3bd7c338db\") " Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.437912 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsjzt\" (UniqueName: \"kubernetes.io/projected/00b75b77-6e85-4af7-be07-ed3bd7c338db-kube-api-access-dsjzt\") pod \"00b75b77-6e85-4af7-be07-ed3bd7c338db\" (UID: \"00b75b77-6e85-4af7-be07-ed3bd7c338db\") " Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.437949 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/134b5d13-5549-459c-8a27-2e5908651fb0-operator-scripts\") pod \"134b5d13-5549-459c-8a27-2e5908651fb0\" (UID: \"134b5d13-5549-459c-8a27-2e5908651fb0\") " Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.438310 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmxpj\" (UniqueName: \"kubernetes.io/projected/134b5d13-5549-459c-8a27-2e5908651fb0-kube-api-access-tmxpj\") pod \"134b5d13-5549-459c-8a27-2e5908651fb0\" (UID: \"134b5d13-5549-459c-8a27-2e5908651fb0\") " Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.438741 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b75b77-6e85-4af7-be07-ed3bd7c338db-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "00b75b77-6e85-4af7-be07-ed3bd7c338db" (UID: "00b75b77-6e85-4af7-be07-ed3bd7c338db"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.439362 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00b75b77-6e85-4af7-be07-ed3bd7c338db-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.439359 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/134b5d13-5549-459c-8a27-2e5908651fb0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "134b5d13-5549-459c-8a27-2e5908651fb0" (UID: "134b5d13-5549-459c-8a27-2e5908651fb0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.447170 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/134b5d13-5549-459c-8a27-2e5908651fb0-kube-api-access-tmxpj" (OuterVolumeSpecName: "kube-api-access-tmxpj") pod "134b5d13-5549-459c-8a27-2e5908651fb0" (UID: "134b5d13-5549-459c-8a27-2e5908651fb0"). InnerVolumeSpecName "kube-api-access-tmxpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.449190 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00b75b77-6e85-4af7-be07-ed3bd7c338db-kube-api-access-dsjzt" (OuterVolumeSpecName: "kube-api-access-dsjzt") pod "00b75b77-6e85-4af7-be07-ed3bd7c338db" (UID: "00b75b77-6e85-4af7-be07-ed3bd7c338db"). InnerVolumeSpecName "kube-api-access-dsjzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.541317 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmxpj\" (UniqueName: \"kubernetes.io/projected/134b5d13-5549-459c-8a27-2e5908651fb0-kube-api-access-tmxpj\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.541360 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsjzt\" (UniqueName: \"kubernetes.io/projected/00b75b77-6e85-4af7-be07-ed3bd7c338db-kube-api-access-dsjzt\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.541373 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/134b5d13-5549-459c-8a27-2e5908651fb0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.909357 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a323-account-create-update-qmmlf" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.909364 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a323-account-create-update-qmmlf" event={"ID":"134b5d13-5549-459c-8a27-2e5908651fb0","Type":"ContainerDied","Data":"6bb185ef269819f99b9fea0be4db1405aac9ea5fefd3549685ab7c19cbb6f36e"} Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.909680 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6bb185ef269819f99b9fea0be4db1405aac9ea5fefd3549685ab7c19cbb6f36e" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.912186 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bbs2g" event={"ID":"00b75b77-6e85-4af7-be07-ed3bd7c338db","Type":"ContainerDied","Data":"d412a455ac831ef570257173fc6f07e873af3ee69f669c6071de8f1dfa8184fc"} Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.912217 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bbs2g" Jan 29 08:06:25 crc kubenswrapper[4861]: I0129 08:06:25.912235 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d412a455ac831ef570257173fc6f07e873af3ee69f669c6071de8f1dfa8184fc" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.953333 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.953695 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.979740 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-z58jp"] Jan 29 08:06:27 crc kubenswrapper[4861]: E0129 08:06:27.980056 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="134b5d13-5549-459c-8a27-2e5908651fb0" containerName="mariadb-account-create-update" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.980069 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="134b5d13-5549-459c-8a27-2e5908651fb0" containerName="mariadb-account-create-update" Jan 29 08:06:27 crc kubenswrapper[4861]: E0129 08:06:27.980094 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b75b77-6e85-4af7-be07-ed3bd7c338db" containerName="mariadb-database-create" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.980100 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b75b77-6e85-4af7-be07-ed3bd7c338db" containerName="mariadb-database-create" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.980275 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="00b75b77-6e85-4af7-be07-ed3bd7c338db" containerName="mariadb-database-create" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.980291 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="134b5d13-5549-459c-8a27-2e5908651fb0" containerName="mariadb-account-create-update" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.980794 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.982609 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-cb5r8" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.985039 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 08:06:27 crc kubenswrapper[4861]: I0129 08:06:27.995403 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-z58jp"] Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.081937 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-combined-ca-bundle\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.081982 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-db-sync-config-data\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.082088 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8kqv\" (UniqueName: \"kubernetes.io/projected/b81a3989-af02-4421-8674-c39b2dd81601-kube-api-access-h8kqv\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.184395 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8kqv\" (UniqueName: \"kubernetes.io/projected/b81a3989-af02-4421-8674-c39b2dd81601-kube-api-access-h8kqv\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.184868 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-combined-ca-bundle\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.184982 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-db-sync-config-data\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.193063 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-db-sync-config-data\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.193568 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-combined-ca-bundle\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.204576 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8kqv\" (UniqueName: \"kubernetes.io/projected/b81a3989-af02-4421-8674-c39b2dd81601-kube-api-access-h8kqv\") pod \"barbican-db-sync-z58jp\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.349176 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.815375 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-z58jp"] Jan 29 08:06:28 crc kubenswrapper[4861]: I0129 08:06:28.942584 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z58jp" event={"ID":"b81a3989-af02-4421-8674-c39b2dd81601","Type":"ContainerStarted","Data":"c0b9ef8f2d5c85af2e0da560923e0645aa5ff8da1fe32bdfb6f42e1adce7481c"} Jan 29 08:06:29 crc kubenswrapper[4861]: I0129 08:06:29.078398 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-67zds" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="registry-server" probeResult="failure" output=< Jan 29 08:06:29 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:06:29 crc kubenswrapper[4861]: > Jan 29 08:06:29 crc kubenswrapper[4861]: I0129 08:06:29.950524 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z58jp" event={"ID":"b81a3989-af02-4421-8674-c39b2dd81601","Type":"ContainerStarted","Data":"df36bfb544ce387d3f722be4b0fdd07d6a5a55b91e45f6b42941a822fd332436"} Jan 29 08:06:29 crc kubenswrapper[4861]: I0129 08:06:29.975590 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-z58jp" podStartSLOduration=2.975564107 podStartE2EDuration="2.975564107s" podCreationTimestamp="2026-01-29 08:06:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:06:29.97188339 +0000 UTC m=+5481.643377967" watchObservedRunningTime="2026-01-29 08:06:29.975564107 +0000 UTC m=+5481.647058664" Jan 29 08:06:30 crc kubenswrapper[4861]: I0129 08:06:30.630116 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:06:30 crc kubenswrapper[4861]: I0129 08:06:30.630197 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:06:32 crc kubenswrapper[4861]: I0129 08:06:32.993873 4861 generic.go:334] "Generic (PLEG): container finished" podID="b81a3989-af02-4421-8674-c39b2dd81601" containerID="df36bfb544ce387d3f722be4b0fdd07d6a5a55b91e45f6b42941a822fd332436" exitCode=0 Jan 29 08:06:32 crc kubenswrapper[4861]: I0129 08:06:32.993965 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z58jp" event={"ID":"b81a3989-af02-4421-8674-c39b2dd81601","Type":"ContainerDied","Data":"df36bfb544ce387d3f722be4b0fdd07d6a5a55b91e45f6b42941a822fd332436"} Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.346519 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.511437 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8kqv\" (UniqueName: \"kubernetes.io/projected/b81a3989-af02-4421-8674-c39b2dd81601-kube-api-access-h8kqv\") pod \"b81a3989-af02-4421-8674-c39b2dd81601\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.511760 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-db-sync-config-data\") pod \"b81a3989-af02-4421-8674-c39b2dd81601\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.511824 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-combined-ca-bundle\") pod \"b81a3989-af02-4421-8674-c39b2dd81601\" (UID: \"b81a3989-af02-4421-8674-c39b2dd81601\") " Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.519131 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b81a3989-af02-4421-8674-c39b2dd81601" (UID: "b81a3989-af02-4421-8674-c39b2dd81601"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.519788 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b81a3989-af02-4421-8674-c39b2dd81601-kube-api-access-h8kqv" (OuterVolumeSpecName: "kube-api-access-h8kqv") pod "b81a3989-af02-4421-8674-c39b2dd81601" (UID: "b81a3989-af02-4421-8674-c39b2dd81601"). InnerVolumeSpecName "kube-api-access-h8kqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.563914 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b81a3989-af02-4421-8674-c39b2dd81601" (UID: "b81a3989-af02-4421-8674-c39b2dd81601"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.615312 4861 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.615399 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b81a3989-af02-4421-8674-c39b2dd81601-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:34 crc kubenswrapper[4861]: I0129 08:06:34.615442 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8kqv\" (UniqueName: \"kubernetes.io/projected/b81a3989-af02-4421-8674-c39b2dd81601-kube-api-access-h8kqv\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.020249 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-z58jp" event={"ID":"b81a3989-af02-4421-8674-c39b2dd81601","Type":"ContainerDied","Data":"c0b9ef8f2d5c85af2e0da560923e0645aa5ff8da1fe32bdfb6f42e1adce7481c"} Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.020293 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0b9ef8f2d5c85af2e0da560923e0645aa5ff8da1fe32bdfb6f42e1adce7481c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.020319 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-z58jp" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.274606 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-c6c4999b6-q78x5"] Jan 29 08:06:35 crc kubenswrapper[4861]: E0129 08:06:35.278799 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b81a3989-af02-4421-8674-c39b2dd81601" containerName="barbican-db-sync" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.278829 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b81a3989-af02-4421-8674-c39b2dd81601" containerName="barbican-db-sync" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.279032 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b81a3989-af02-4421-8674-c39b2dd81601" containerName="barbican-db-sync" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.283616 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.287322 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-cb5r8" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.287548 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.287749 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.357671 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7749945477-kz7r5"] Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.392591 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-c6c4999b6-q78x5"] Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.392707 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.401682 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7749945477-kz7r5"] Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.406368 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.420115 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fcd9d5db5-mx8h4"] Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.424554 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.427253 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fcd9d5db5-mx8h4"] Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.436414 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-logs\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.436489 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfr4g\" (UniqueName: \"kubernetes.io/projected/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-kube-api-access-wfr4g\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.436541 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-config-data\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.436596 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-config-data-custom\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.436626 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-combined-ca-bundle\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.446036 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-554866789b-pmw2c"] Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.447873 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.454683 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.462574 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-554866789b-pmw2c"] Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.538248 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-dns-svc\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.538735 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-config-data-custom\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.538873 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1bf637d-8a99-466d-a782-c247e39e303e-logs\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.538962 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-config\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.539092 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crnlf\" (UniqueName: \"kubernetes.io/projected/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-kube-api-access-crnlf\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.539193 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-combined-ca-bundle\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.539265 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.539350 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data-custom\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.539712 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-config-data\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.539810 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-combined-ca-bundle\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.539929 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-logs\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540002 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-nb\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540105 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-sb\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540241 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfr4g\" (UniqueName: \"kubernetes.io/projected/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-kube-api-access-wfr4g\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540295 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rrq5\" (UniqueName: \"kubernetes.io/projected/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-kube-api-access-8rrq5\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540355 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trg5x\" (UniqueName: \"kubernetes.io/projected/a1bf637d-8a99-466d-a782-c247e39e303e-kube-api-access-trg5x\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540426 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-logs\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540425 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-logs\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540473 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-combined-ca-bundle\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540551 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-config-data\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.540588 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-config-data-custom\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.543387 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-combined-ca-bundle\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.544045 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-config-data-custom\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.555197 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-config-data\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.557942 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfr4g\" (UniqueName: \"kubernetes.io/projected/3b93d448-26b7-41f9-96aa-1aa8b0a6f134-kube-api-access-wfr4g\") pod \"barbican-keystone-listener-c6c4999b6-q78x5\" (UID: \"3b93d448-26b7-41f9-96aa-1aa8b0a6f134\") " pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.607058 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.642340 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1bf637d-8a99-466d-a782-c247e39e303e-logs\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.642913 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-config\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.642956 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crnlf\" (UniqueName: \"kubernetes.io/projected/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-kube-api-access-crnlf\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.642991 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643042 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data-custom\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643119 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-config-data\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643163 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-combined-ca-bundle\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643225 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-nb\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643279 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-sb\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643322 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1bf637d-8a99-466d-a782-c247e39e303e-logs\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643341 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rrq5\" (UniqueName: \"kubernetes.io/projected/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-kube-api-access-8rrq5\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643417 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trg5x\" (UniqueName: \"kubernetes.io/projected/a1bf637d-8a99-466d-a782-c247e39e303e-kube-api-access-trg5x\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643466 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-logs\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643493 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-combined-ca-bundle\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643548 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-config-data-custom\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643611 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-dns-svc\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.643842 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-config\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.644264 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-dns-svc\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.644717 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-logs\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.646103 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-nb\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.650274 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-sb\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.651351 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-config-data-custom\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.653794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.654526 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-config-data\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.657328 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-combined-ca-bundle\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.661465 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data-custom\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.662224 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rrq5\" (UniqueName: \"kubernetes.io/projected/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-kube-api-access-8rrq5\") pod \"dnsmasq-dns-7fcd9d5db5-mx8h4\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.666692 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crnlf\" (UniqueName: \"kubernetes.io/projected/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-kube-api-access-crnlf\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.667436 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171-combined-ca-bundle\") pod \"barbican-worker-7749945477-kz7r5\" (UID: \"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171\") " pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.671543 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trg5x\" (UniqueName: \"kubernetes.io/projected/a1bf637d-8a99-466d-a782-c247e39e303e-kube-api-access-trg5x\") pod \"barbican-api-554866789b-pmw2c\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.721846 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7749945477-kz7r5" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.748616 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:35 crc kubenswrapper[4861]: I0129 08:06:35.780729 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:36 crc kubenswrapper[4861]: I0129 08:06:36.083553 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7749945477-kz7r5"] Jan 29 08:06:36 crc kubenswrapper[4861]: I0129 08:06:36.160740 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-c6c4999b6-q78x5"] Jan 29 08:06:36 crc kubenswrapper[4861]: I0129 08:06:36.440941 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fcd9d5db5-mx8h4"] Jan 29 08:06:36 crc kubenswrapper[4861]: I0129 08:06:36.494871 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-554866789b-pmw2c"] Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.061464 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-554866789b-pmw2c" event={"ID":"a1bf637d-8a99-466d-a782-c247e39e303e","Type":"ContainerStarted","Data":"62d007ca1f02d5ff71d0e481086eb43d47e48f093bc4190bc005189e46afece0"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.061785 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.061796 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.061804 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-554866789b-pmw2c" event={"ID":"a1bf637d-8a99-466d-a782-c247e39e303e","Type":"ContainerStarted","Data":"d54eff99b639162790874a64f2642cdf1b049c0c04b2f96344a42234e3a4b1f3"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.061816 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-554866789b-pmw2c" event={"ID":"a1bf637d-8a99-466d-a782-c247e39e303e","Type":"ContainerStarted","Data":"c8df9907241d83a46601f1aaf60ac0905e8b2aa3a09ecda3dc613eea27b95ab1"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.070227 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" event={"ID":"3b93d448-26b7-41f9-96aa-1aa8b0a6f134","Type":"ContainerStarted","Data":"d203bd1003edaf31acb3125db40a8b5a121fe47c347fe26923c9ed6092cf858e"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.070263 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" event={"ID":"3b93d448-26b7-41f9-96aa-1aa8b0a6f134","Type":"ContainerStarted","Data":"0e4002d08f0b67a9033bd1c21e82049b9f9af7d0a5f2acc4bb63da926bc3833a"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.070275 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" event={"ID":"3b93d448-26b7-41f9-96aa-1aa8b0a6f134","Type":"ContainerStarted","Data":"0e7e0d903edbf5f598a8fc6895be3d6df8b2a30ac24c2eabb38007863b24acc3"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.073937 4861 generic.go:334] "Generic (PLEG): container finished" podID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerID="cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537" exitCode=0 Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.074008 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" event={"ID":"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c","Type":"ContainerDied","Data":"cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.074033 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" event={"ID":"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c","Type":"ContainerStarted","Data":"c88ff5efc93252de47eb84b989273293e526b169ff434067d3e0d374f2532751"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.082477 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7749945477-kz7r5" event={"ID":"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171","Type":"ContainerStarted","Data":"70102294fbbd6b1c12b59310c838810ff6d313105b57d44cab26e3bc01be056e"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.082519 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7749945477-kz7r5" event={"ID":"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171","Type":"ContainerStarted","Data":"cda966bc2bba5878de86df04b3c36d3b2a304223d715ef67791f0c369f7ec51e"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.082531 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7749945477-kz7r5" event={"ID":"734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171","Type":"ContainerStarted","Data":"32bd080781756feef46e18ad120a41825c3b92083194605d4e8168c370751e09"} Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.102255 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-554866789b-pmw2c" podStartSLOduration=2.102232711 podStartE2EDuration="2.102232711s" podCreationTimestamp="2026-01-29 08:06:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:06:37.093472991 +0000 UTC m=+5488.764967548" watchObservedRunningTime="2026-01-29 08:06:37.102232711 +0000 UTC m=+5488.773727268" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.143156 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7749945477-kz7r5" podStartSLOduration=2.143130422 podStartE2EDuration="2.143130422s" podCreationTimestamp="2026-01-29 08:06:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:06:37.138586863 +0000 UTC m=+5488.810081420" watchObservedRunningTime="2026-01-29 08:06:37.143130422 +0000 UTC m=+5488.814624969" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.173127 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-c6c4999b6-q78x5" podStartSLOduration=2.173108697 podStartE2EDuration="2.173108697s" podCreationTimestamp="2026-01-29 08:06:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:06:37.16519199 +0000 UTC m=+5488.836686557" watchObservedRunningTime="2026-01-29 08:06:37.173108697 +0000 UTC m=+5488.844603254" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.795961 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6d596d8b5b-rdh75"] Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.798346 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.809013 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.809610 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.845796 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6d596d8b5b-rdh75"] Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.892506 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-public-tls-certs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.892765 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-config-data\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.892821 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-combined-ca-bundle\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.892866 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-internal-tls-certs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.892905 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-config-data-custom\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.892928 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce853f12-bafd-4fbc-90e9-ab2802a25722-logs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.893007 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thsbt\" (UniqueName: \"kubernetes.io/projected/ce853f12-bafd-4fbc-90e9-ab2802a25722-kube-api-access-thsbt\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.994530 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-config-data-custom\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.994586 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce853f12-bafd-4fbc-90e9-ab2802a25722-logs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.994609 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thsbt\" (UniqueName: \"kubernetes.io/projected/ce853f12-bafd-4fbc-90e9-ab2802a25722-kube-api-access-thsbt\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.994674 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-public-tls-certs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.994694 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-config-data\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.994724 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-combined-ca-bundle\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.994764 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-internal-tls-certs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:37 crc kubenswrapper[4861]: I0129 08:06:37.996318 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce853f12-bafd-4fbc-90e9-ab2802a25722-logs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.000444 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-public-tls-certs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.010683 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-config-data-custom\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.013337 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.015417 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-combined-ca-bundle\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.016361 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-config-data\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.016719 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce853f12-bafd-4fbc-90e9-ab2802a25722-internal-tls-certs\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.017638 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thsbt\" (UniqueName: \"kubernetes.io/projected/ce853f12-bafd-4fbc-90e9-ab2802a25722-kube-api-access-thsbt\") pod \"barbican-api-6d596d8b5b-rdh75\" (UID: \"ce853f12-bafd-4fbc-90e9-ab2802a25722\") " pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.079300 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.096683 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" event={"ID":"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c","Type":"ContainerStarted","Data":"bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1"} Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.097567 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.239047 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.246279 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" podStartSLOduration=3.246262626 podStartE2EDuration="3.246262626s" podCreationTimestamp="2026-01-29 08:06:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:06:38.123399318 +0000 UTC m=+5489.794893895" watchObservedRunningTime="2026-01-29 08:06:38.246262626 +0000 UTC m=+5489.917757183" Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.251364 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-67zds"] Jan 29 08:06:38 crc kubenswrapper[4861]: W0129 08:06:38.724350 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce853f12_bafd_4fbc_90e9_ab2802a25722.slice/crio-57f36c34614228196c7ab6bc2811195dfdc72a864f6315aaa55937dcfe5476fc WatchSource:0}: Error finding container 57f36c34614228196c7ab6bc2811195dfdc72a864f6315aaa55937dcfe5476fc: Status 404 returned error can't find the container with id 57f36c34614228196c7ab6bc2811195dfdc72a864f6315aaa55937dcfe5476fc Jan 29 08:06:38 crc kubenswrapper[4861]: I0129 08:06:38.728264 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6d596d8b5b-rdh75"] Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.115045 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d596d8b5b-rdh75" event={"ID":"ce853f12-bafd-4fbc-90e9-ab2802a25722","Type":"ContainerStarted","Data":"fc502ae2e1a361b0cc367db06f92276f77d3bc8acf9c7ce3b998964108687390"} Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.115163 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-67zds" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="registry-server" containerID="cri-o://25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95" gracePeriod=2 Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.135531 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d596d8b5b-rdh75" event={"ID":"ce853f12-bafd-4fbc-90e9-ab2802a25722","Type":"ContainerStarted","Data":"57f36c34614228196c7ab6bc2811195dfdc72a864f6315aaa55937dcfe5476fc"} Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.480418 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.624722 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-utilities\") pod \"94cb18e9-8205-47ce-af00-713abf4bae34\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.624848 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-catalog-content\") pod \"94cb18e9-8205-47ce-af00-713abf4bae34\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.624873 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sftx\" (UniqueName: \"kubernetes.io/projected/94cb18e9-8205-47ce-af00-713abf4bae34-kube-api-access-8sftx\") pod \"94cb18e9-8205-47ce-af00-713abf4bae34\" (UID: \"94cb18e9-8205-47ce-af00-713abf4bae34\") " Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.625700 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-utilities" (OuterVolumeSpecName: "utilities") pod "94cb18e9-8205-47ce-af00-713abf4bae34" (UID: "94cb18e9-8205-47ce-af00-713abf4bae34"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.628769 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94cb18e9-8205-47ce-af00-713abf4bae34-kube-api-access-8sftx" (OuterVolumeSpecName: "kube-api-access-8sftx") pod "94cb18e9-8205-47ce-af00-713abf4bae34" (UID: "94cb18e9-8205-47ce-af00-713abf4bae34"). InnerVolumeSpecName "kube-api-access-8sftx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.726695 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sftx\" (UniqueName: \"kubernetes.io/projected/94cb18e9-8205-47ce-af00-713abf4bae34-kube-api-access-8sftx\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.726727 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.737324 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94cb18e9-8205-47ce-af00-713abf4bae34" (UID: "94cb18e9-8205-47ce-af00-713abf4bae34"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:06:39 crc kubenswrapper[4861]: I0129 08:06:39.828962 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94cb18e9-8205-47ce-af00-713abf4bae34-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.127277 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6d596d8b5b-rdh75" event={"ID":"ce853f12-bafd-4fbc-90e9-ab2802a25722","Type":"ContainerStarted","Data":"0a69709d69bc7a7da04085922e0c73b951675bf875a3cfd6cb415161b284d749"} Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.127452 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.127640 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.131307 4861 generic.go:334] "Generic (PLEG): container finished" podID="94cb18e9-8205-47ce-af00-713abf4bae34" containerID="25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95" exitCode=0 Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.131373 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67zds" event={"ID":"94cb18e9-8205-47ce-af00-713abf4bae34","Type":"ContainerDied","Data":"25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95"} Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.131391 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67zds" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.131425 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67zds" event={"ID":"94cb18e9-8205-47ce-af00-713abf4bae34","Type":"ContainerDied","Data":"63629f082e4a1aa6bf3ef3b583cc5d11bfee663890e3e60e605107a64b654dc6"} Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.131472 4861 scope.go:117] "RemoveContainer" containerID="25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.151320 4861 scope.go:117] "RemoveContainer" containerID="0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.171394 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6d596d8b5b-rdh75" podStartSLOduration=3.171369609 podStartE2EDuration="3.171369609s" podCreationTimestamp="2026-01-29 08:06:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:06:40.154513848 +0000 UTC m=+5491.826008415" watchObservedRunningTime="2026-01-29 08:06:40.171369609 +0000 UTC m=+5491.842864186" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.175186 4861 scope.go:117] "RemoveContainer" containerID="bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.236528 4861 scope.go:117] "RemoveContainer" containerID="25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95" Jan 29 08:06:40 crc kubenswrapper[4861]: E0129 08:06:40.237735 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95\": container with ID starting with 25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95 not found: ID does not exist" containerID="25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.237810 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95"} err="failed to get container status \"25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95\": rpc error: code = NotFound desc = could not find container \"25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95\": container with ID starting with 25c8bb40aef68ee2ab146c2dfa967c61a0e4c29a4301ee831583d2b1056e1a95 not found: ID does not exist" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.237848 4861 scope.go:117] "RemoveContainer" containerID="0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a" Jan 29 08:06:40 crc kubenswrapper[4861]: E0129 08:06:40.239158 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a\": container with ID starting with 0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a not found: ID does not exist" containerID="0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.239246 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a"} err="failed to get container status \"0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a\": rpc error: code = NotFound desc = could not find container \"0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a\": container with ID starting with 0b6bc5048699370c8179ea7e7dfbd47c8709a7ec477a68afdb249bbcdb9d660a not found: ID does not exist" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.239297 4861 scope.go:117] "RemoveContainer" containerID="bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071" Jan 29 08:06:40 crc kubenswrapper[4861]: E0129 08:06:40.239924 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071\": container with ID starting with bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071 not found: ID does not exist" containerID="bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.239976 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071"} err="failed to get container status \"bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071\": rpc error: code = NotFound desc = could not find container \"bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071\": container with ID starting with bee7785ea40fd8f7500020db016176c31adcb0b468784f2898f784fc40391071 not found: ID does not exist" Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.253268 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-67zds"] Jan 29 08:06:40 crc kubenswrapper[4861]: I0129 08:06:40.267063 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-67zds"] Jan 29 08:06:41 crc kubenswrapper[4861]: I0129 08:06:41.137362 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" path="/var/lib/kubelet/pods/94cb18e9-8205-47ce-af00-713abf4bae34/volumes" Jan 29 08:06:44 crc kubenswrapper[4861]: I0129 08:06:44.770793 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:45 crc kubenswrapper[4861]: I0129 08:06:45.752381 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:06:45 crc kubenswrapper[4861]: I0129 08:06:45.826715 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c5cdbc687-h2jsh"] Jan 29 08:06:45 crc kubenswrapper[4861]: I0129 08:06:45.827169 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" podUID="013166d7-5f55-45cd-914e-f3200cd9c79a" containerName="dnsmasq-dns" containerID="cri-o://978ad967fa4f0bd4de7a5caa8b6028e4d4c369ab055cdbb2c3f9a9cd6048f762" gracePeriod=10 Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.195819 4861 generic.go:334] "Generic (PLEG): container finished" podID="013166d7-5f55-45cd-914e-f3200cd9c79a" containerID="978ad967fa4f0bd4de7a5caa8b6028e4d4c369ab055cdbb2c3f9a9cd6048f762" exitCode=0 Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.195933 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" event={"ID":"013166d7-5f55-45cd-914e-f3200cd9c79a","Type":"ContainerDied","Data":"978ad967fa4f0bd4de7a5caa8b6028e4d4c369ab055cdbb2c3f9a9cd6048f762"} Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.469458 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6d596d8b5b-rdh75" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.528241 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-554866789b-pmw2c"] Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.528486 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api-log" containerID="cri-o://d54eff99b639162790874a64f2642cdf1b049c0c04b2f96344a42234e3a4b1f3" gracePeriod=30 Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.528798 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api" containerID="cri-o://62d007ca1f02d5ff71d0e481086eb43d47e48f093bc4190bc005189e46afece0" gracePeriod=30 Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.545482 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": EOF" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.559700 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": EOF" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.564284 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": EOF" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.564304 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": EOF" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.596922 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.771726 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n76rs\" (UniqueName: \"kubernetes.io/projected/013166d7-5f55-45cd-914e-f3200cd9c79a-kube-api-access-n76rs\") pod \"013166d7-5f55-45cd-914e-f3200cd9c79a\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.772056 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-dns-svc\") pod \"013166d7-5f55-45cd-914e-f3200cd9c79a\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.772121 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-config\") pod \"013166d7-5f55-45cd-914e-f3200cd9c79a\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.772170 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-sb\") pod \"013166d7-5f55-45cd-914e-f3200cd9c79a\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.772209 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-nb\") pod \"013166d7-5f55-45cd-914e-f3200cd9c79a\" (UID: \"013166d7-5f55-45cd-914e-f3200cd9c79a\") " Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.783360 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/013166d7-5f55-45cd-914e-f3200cd9c79a-kube-api-access-n76rs" (OuterVolumeSpecName: "kube-api-access-n76rs") pod "013166d7-5f55-45cd-914e-f3200cd9c79a" (UID: "013166d7-5f55-45cd-914e-f3200cd9c79a"). InnerVolumeSpecName "kube-api-access-n76rs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.822739 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-config" (OuterVolumeSpecName: "config") pod "013166d7-5f55-45cd-914e-f3200cd9c79a" (UID: "013166d7-5f55-45cd-914e-f3200cd9c79a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.833620 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "013166d7-5f55-45cd-914e-f3200cd9c79a" (UID: "013166d7-5f55-45cd-914e-f3200cd9c79a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.839314 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "013166d7-5f55-45cd-914e-f3200cd9c79a" (UID: "013166d7-5f55-45cd-914e-f3200cd9c79a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.854605 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "013166d7-5f55-45cd-914e-f3200cd9c79a" (UID: "013166d7-5f55-45cd-914e-f3200cd9c79a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.874429 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n76rs\" (UniqueName: \"kubernetes.io/projected/013166d7-5f55-45cd-914e-f3200cd9c79a-kube-api-access-n76rs\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.874468 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.874478 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.874487 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:46 crc kubenswrapper[4861]: I0129 08:06:46.874496 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/013166d7-5f55-45cd-914e-f3200cd9c79a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:47 crc kubenswrapper[4861]: I0129 08:06:47.210424 4861 generic.go:334] "Generic (PLEG): container finished" podID="a1bf637d-8a99-466d-a782-c247e39e303e" containerID="d54eff99b639162790874a64f2642cdf1b049c0c04b2f96344a42234e3a4b1f3" exitCode=143 Jan 29 08:06:47 crc kubenswrapper[4861]: I0129 08:06:47.210507 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-554866789b-pmw2c" event={"ID":"a1bf637d-8a99-466d-a782-c247e39e303e","Type":"ContainerDied","Data":"d54eff99b639162790874a64f2642cdf1b049c0c04b2f96344a42234e3a4b1f3"} Jan 29 08:06:47 crc kubenswrapper[4861]: I0129 08:06:47.213284 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" event={"ID":"013166d7-5f55-45cd-914e-f3200cd9c79a","Type":"ContainerDied","Data":"258afa92900d241681f57f2b6ed275a0160751a0ea65b82487f5e31e663168ef"} Jan 29 08:06:47 crc kubenswrapper[4861]: I0129 08:06:47.213356 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cdbc687-h2jsh" Jan 29 08:06:47 crc kubenswrapper[4861]: I0129 08:06:47.213470 4861 scope.go:117] "RemoveContainer" containerID="978ad967fa4f0bd4de7a5caa8b6028e4d4c369ab055cdbb2c3f9a9cd6048f762" Jan 29 08:06:47 crc kubenswrapper[4861]: I0129 08:06:47.245389 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c5cdbc687-h2jsh"] Jan 29 08:06:47 crc kubenswrapper[4861]: I0129 08:06:47.251794 4861 scope.go:117] "RemoveContainer" containerID="ac3e438dfcb634fce04956a8111f0a0091b6508d108a2e892c54a27a3f6c4fb6" Jan 29 08:06:47 crc kubenswrapper[4861]: I0129 08:06:47.263839 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c5cdbc687-h2jsh"] Jan 29 08:06:49 crc kubenswrapper[4861]: I0129 08:06:49.139898 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="013166d7-5f55-45cd-914e-f3200cd9c79a" path="/var/lib/kubelet/pods/013166d7-5f55-45cd-914e-f3200cd9c79a/volumes" Jan 29 08:06:51 crc kubenswrapper[4861]: I0129 08:06:51.606527 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 08:06:51 crc kubenswrapper[4861]: I0129 08:06:51.976768 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": read tcp 10.217.0.2:48698->10.217.1.41:9311: read: connection reset by peer" Jan 29 08:06:51 crc kubenswrapper[4861]: I0129 08:06:51.976827 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-554866789b-pmw2c" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": read tcp 10.217.0.2:48712->10.217.1.41:9311: read: connection reset by peer" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.260262 4861 generic.go:334] "Generic (PLEG): container finished" podID="a1bf637d-8a99-466d-a782-c247e39e303e" containerID="62d007ca1f02d5ff71d0e481086eb43d47e48f093bc4190bc005189e46afece0" exitCode=0 Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.260511 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-554866789b-pmw2c" event={"ID":"a1bf637d-8a99-466d-a782-c247e39e303e","Type":"ContainerDied","Data":"62d007ca1f02d5ff71d0e481086eb43d47e48f093bc4190bc005189e46afece0"} Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.413273 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.579539 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data\") pod \"a1bf637d-8a99-466d-a782-c247e39e303e\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.579608 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1bf637d-8a99-466d-a782-c247e39e303e-logs\") pod \"a1bf637d-8a99-466d-a782-c247e39e303e\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.579633 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data-custom\") pod \"a1bf637d-8a99-466d-a782-c247e39e303e\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.579680 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trg5x\" (UniqueName: \"kubernetes.io/projected/a1bf637d-8a99-466d-a782-c247e39e303e-kube-api-access-trg5x\") pod \"a1bf637d-8a99-466d-a782-c247e39e303e\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.579766 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-combined-ca-bundle\") pod \"a1bf637d-8a99-466d-a782-c247e39e303e\" (UID: \"a1bf637d-8a99-466d-a782-c247e39e303e\") " Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.580603 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1bf637d-8a99-466d-a782-c247e39e303e-logs" (OuterVolumeSpecName: "logs") pod "a1bf637d-8a99-466d-a782-c247e39e303e" (UID: "a1bf637d-8a99-466d-a782-c247e39e303e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.585156 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1bf637d-8a99-466d-a782-c247e39e303e-kube-api-access-trg5x" (OuterVolumeSpecName: "kube-api-access-trg5x") pod "a1bf637d-8a99-466d-a782-c247e39e303e" (UID: "a1bf637d-8a99-466d-a782-c247e39e303e"). InnerVolumeSpecName "kube-api-access-trg5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.585418 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a1bf637d-8a99-466d-a782-c247e39e303e" (UID: "a1bf637d-8a99-466d-a782-c247e39e303e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.613308 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a1bf637d-8a99-466d-a782-c247e39e303e" (UID: "a1bf637d-8a99-466d-a782-c247e39e303e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.624431 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data" (OuterVolumeSpecName: "config-data") pod "a1bf637d-8a99-466d-a782-c247e39e303e" (UID: "a1bf637d-8a99-466d-a782-c247e39e303e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.681885 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.682240 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1bf637d-8a99-466d-a782-c247e39e303e-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.682334 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.682378 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trg5x\" (UniqueName: \"kubernetes.io/projected/a1bf637d-8a99-466d-a782-c247e39e303e-kube-api-access-trg5x\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:52 crc kubenswrapper[4861]: I0129 08:06:52.682387 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1bf637d-8a99-466d-a782-c247e39e303e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:06:53 crc kubenswrapper[4861]: I0129 08:06:53.270835 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-554866789b-pmw2c" event={"ID":"a1bf637d-8a99-466d-a782-c247e39e303e","Type":"ContainerDied","Data":"c8df9907241d83a46601f1aaf60ac0905e8b2aa3a09ecda3dc613eea27b95ab1"} Jan 29 08:06:53 crc kubenswrapper[4861]: I0129 08:06:53.270904 4861 scope.go:117] "RemoveContainer" containerID="62d007ca1f02d5ff71d0e481086eb43d47e48f093bc4190bc005189e46afece0" Jan 29 08:06:53 crc kubenswrapper[4861]: I0129 08:06:53.270975 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-554866789b-pmw2c" Jan 29 08:06:53 crc kubenswrapper[4861]: I0129 08:06:53.310714 4861 scope.go:117] "RemoveContainer" containerID="d54eff99b639162790874a64f2642cdf1b049c0c04b2f96344a42234e3a4b1f3" Jan 29 08:06:53 crc kubenswrapper[4861]: I0129 08:06:53.312947 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-554866789b-pmw2c"] Jan 29 08:06:53 crc kubenswrapper[4861]: I0129 08:06:53.344702 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-554866789b-pmw2c"] Jan 29 08:06:55 crc kubenswrapper[4861]: I0129 08:06:55.135319 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" path="/var/lib/kubelet/pods/a1bf637d-8a99-466d-a782-c247e39e303e/volumes" Jan 29 08:07:00 crc kubenswrapper[4861]: I0129 08:07:00.630611 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:07:00 crc kubenswrapper[4861]: I0129 08:07:00.631221 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:07:00 crc kubenswrapper[4861]: I0129 08:07:00.631265 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:07:00 crc kubenswrapper[4861]: I0129 08:07:00.631952 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f12c7cd3a0871c191ad7ec4bd142001b746849f696737470eed8fe923ec11fff"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:07:00 crc kubenswrapper[4861]: I0129 08:07:00.632003 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://f12c7cd3a0871c191ad7ec4bd142001b746849f696737470eed8fe923ec11fff" gracePeriod=600 Jan 29 08:07:01 crc kubenswrapper[4861]: I0129 08:07:01.356833 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="f12c7cd3a0871c191ad7ec4bd142001b746849f696737470eed8fe923ec11fff" exitCode=0 Jan 29 08:07:01 crc kubenswrapper[4861]: I0129 08:07:01.356912 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"f12c7cd3a0871c191ad7ec4bd142001b746849f696737470eed8fe923ec11fff"} Jan 29 08:07:01 crc kubenswrapper[4861]: I0129 08:07:01.357575 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa"} Jan 29 08:07:01 crc kubenswrapper[4861]: I0129 08:07:01.357597 4861 scope.go:117] "RemoveContainer" containerID="3ac352b237a2402bfda9523c5ea28431a0e5c3c161ac27cb56cfc8394a9b774e" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.655188 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-nq84p"] Jan 29 08:07:21 crc kubenswrapper[4861]: E0129 08:07:21.656296 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656321 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api" Jan 29 08:07:21 crc kubenswrapper[4861]: E0129 08:07:21.656349 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="registry-server" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656363 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="registry-server" Jan 29 08:07:21 crc kubenswrapper[4861]: E0129 08:07:21.656388 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="extract-content" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656401 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="extract-content" Jan 29 08:07:21 crc kubenswrapper[4861]: E0129 08:07:21.656423 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api-log" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656435 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api-log" Jan 29 08:07:21 crc kubenswrapper[4861]: E0129 08:07:21.656453 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="013166d7-5f55-45cd-914e-f3200cd9c79a" containerName="dnsmasq-dns" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656465 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="013166d7-5f55-45cd-914e-f3200cd9c79a" containerName="dnsmasq-dns" Jan 29 08:07:21 crc kubenswrapper[4861]: E0129 08:07:21.656490 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="extract-utilities" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656504 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="extract-utilities" Jan 29 08:07:21 crc kubenswrapper[4861]: E0129 08:07:21.656525 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="013166d7-5f55-45cd-914e-f3200cd9c79a" containerName="init" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656537 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="013166d7-5f55-45cd-914e-f3200cd9c79a" containerName="init" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656819 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api-log" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656846 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1bf637d-8a99-466d-a782-c247e39e303e" containerName="barbican-api" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656869 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="94cb18e9-8205-47ce-af00-713abf4bae34" containerName="registry-server" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.656889 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="013166d7-5f55-45cd-914e-f3200cd9c79a" containerName="dnsmasq-dns" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.657800 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.676664 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-nq84p"] Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.767801 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-84ef-account-create-update-ngb6s"] Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.769459 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.772755 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.780545 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-84ef-account-create-update-ngb6s"] Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.790126 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvqgt\" (UniqueName: \"kubernetes.io/projected/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-kube-api-access-kvqgt\") pod \"neutron-db-create-nq84p\" (UID: \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\") " pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.790826 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-operator-scripts\") pod \"neutron-db-create-nq84p\" (UID: \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\") " pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.893416 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-operator-scripts\") pod \"neutron-db-create-nq84p\" (UID: \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\") " pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.893526 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq2xb\" (UniqueName: \"kubernetes.io/projected/bee00f6c-5570-42f4-9594-543e911751fe-kube-api-access-kq2xb\") pod \"neutron-84ef-account-create-update-ngb6s\" (UID: \"bee00f6c-5570-42f4-9594-543e911751fe\") " pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.896171 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-operator-scripts\") pod \"neutron-db-create-nq84p\" (UID: \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\") " pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.896322 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvqgt\" (UniqueName: \"kubernetes.io/projected/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-kube-api-access-kvqgt\") pod \"neutron-db-create-nq84p\" (UID: \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\") " pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.896401 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bee00f6c-5570-42f4-9594-543e911751fe-operator-scripts\") pod \"neutron-84ef-account-create-update-ngb6s\" (UID: \"bee00f6c-5570-42f4-9594-543e911751fe\") " pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.925015 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvqgt\" (UniqueName: \"kubernetes.io/projected/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-kube-api-access-kvqgt\") pod \"neutron-db-create-nq84p\" (UID: \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\") " pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.989189 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.998684 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bee00f6c-5570-42f4-9594-543e911751fe-operator-scripts\") pod \"neutron-84ef-account-create-update-ngb6s\" (UID: \"bee00f6c-5570-42f4-9594-543e911751fe\") " pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:21 crc kubenswrapper[4861]: I0129 08:07:21.998832 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq2xb\" (UniqueName: \"kubernetes.io/projected/bee00f6c-5570-42f4-9594-543e911751fe-kube-api-access-kq2xb\") pod \"neutron-84ef-account-create-update-ngb6s\" (UID: \"bee00f6c-5570-42f4-9594-543e911751fe\") " pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:22 crc kubenswrapper[4861]: I0129 08:07:22.000043 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bee00f6c-5570-42f4-9594-543e911751fe-operator-scripts\") pod \"neutron-84ef-account-create-update-ngb6s\" (UID: \"bee00f6c-5570-42f4-9594-543e911751fe\") " pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:22 crc kubenswrapper[4861]: I0129 08:07:22.017190 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq2xb\" (UniqueName: \"kubernetes.io/projected/bee00f6c-5570-42f4-9594-543e911751fe-kube-api-access-kq2xb\") pod \"neutron-84ef-account-create-update-ngb6s\" (UID: \"bee00f6c-5570-42f4-9594-543e911751fe\") " pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:22 crc kubenswrapper[4861]: I0129 08:07:22.091485 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:22 crc kubenswrapper[4861]: I0129 08:07:22.529556 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-nq84p"] Jan 29 08:07:22 crc kubenswrapper[4861]: W0129 08:07:22.609692 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbee00f6c_5570_42f4_9594_543e911751fe.slice/crio-73dbd8abd885609b1e7d7aab222cb314c8d653c4199cfee343d75436ca4a6ade WatchSource:0}: Error finding container 73dbd8abd885609b1e7d7aab222cb314c8d653c4199cfee343d75436ca4a6ade: Status 404 returned error can't find the container with id 73dbd8abd885609b1e7d7aab222cb314c8d653c4199cfee343d75436ca4a6ade Jan 29 08:07:22 crc kubenswrapper[4861]: I0129 08:07:22.610679 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-84ef-account-create-update-ngb6s"] Jan 29 08:07:22 crc kubenswrapper[4861]: I0129 08:07:22.611810 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nq84p" event={"ID":"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c","Type":"ContainerStarted","Data":"336f13213db52d077d74c7c549d5ba2a551b249db7bade176afddb2ba0a1c9a3"} Jan 29 08:07:23 crc kubenswrapper[4861]: I0129 08:07:23.627439 4861 generic.go:334] "Generic (PLEG): container finished" podID="bee00f6c-5570-42f4-9594-543e911751fe" containerID="97c71a1a99e929acc028573e1bffd3753005ca278e2887b379a03d0ebc25e18a" exitCode=0 Jan 29 08:07:23 crc kubenswrapper[4861]: I0129 08:07:23.627552 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84ef-account-create-update-ngb6s" event={"ID":"bee00f6c-5570-42f4-9594-543e911751fe","Type":"ContainerDied","Data":"97c71a1a99e929acc028573e1bffd3753005ca278e2887b379a03d0ebc25e18a"} Jan 29 08:07:23 crc kubenswrapper[4861]: I0129 08:07:23.627973 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84ef-account-create-update-ngb6s" event={"ID":"bee00f6c-5570-42f4-9594-543e911751fe","Type":"ContainerStarted","Data":"73dbd8abd885609b1e7d7aab222cb314c8d653c4199cfee343d75436ca4a6ade"} Jan 29 08:07:23 crc kubenswrapper[4861]: I0129 08:07:23.630929 4861 generic.go:334] "Generic (PLEG): container finished" podID="d76ada8d-3b62-4dc2-94f9-af552c0e4e1c" containerID="88b1ca20977e144895268125e1f641464b96086df8bc54bfccd8ea407dbd2ed5" exitCode=0 Jan 29 08:07:23 crc kubenswrapper[4861]: I0129 08:07:23.630998 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nq84p" event={"ID":"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c","Type":"ContainerDied","Data":"88b1ca20977e144895268125e1f641464b96086df8bc54bfccd8ea407dbd2ed5"} Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.073414 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.078528 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.176199 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bee00f6c-5570-42f4-9594-543e911751fe-operator-scripts\") pod \"bee00f6c-5570-42f4-9594-543e911751fe\" (UID: \"bee00f6c-5570-42f4-9594-543e911751fe\") " Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.176374 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvqgt\" (UniqueName: \"kubernetes.io/projected/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-kube-api-access-kvqgt\") pod \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\" (UID: \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\") " Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.176581 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kq2xb\" (UniqueName: \"kubernetes.io/projected/bee00f6c-5570-42f4-9594-543e911751fe-kube-api-access-kq2xb\") pod \"bee00f6c-5570-42f4-9594-543e911751fe\" (UID: \"bee00f6c-5570-42f4-9594-543e911751fe\") " Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.176629 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-operator-scripts\") pod \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\" (UID: \"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c\") " Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.179760 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bee00f6c-5570-42f4-9594-543e911751fe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bee00f6c-5570-42f4-9594-543e911751fe" (UID: "bee00f6c-5570-42f4-9594-543e911751fe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.182116 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d76ada8d-3b62-4dc2-94f9-af552c0e4e1c" (UID: "d76ada8d-3b62-4dc2-94f9-af552c0e4e1c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.187343 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-kube-api-access-kvqgt" (OuterVolumeSpecName: "kube-api-access-kvqgt") pod "d76ada8d-3b62-4dc2-94f9-af552c0e4e1c" (UID: "d76ada8d-3b62-4dc2-94f9-af552c0e4e1c"). InnerVolumeSpecName "kube-api-access-kvqgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.189899 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bee00f6c-5570-42f4-9594-543e911751fe-kube-api-access-kq2xb" (OuterVolumeSpecName: "kube-api-access-kq2xb") pod "bee00f6c-5570-42f4-9594-543e911751fe" (UID: "bee00f6c-5570-42f4-9594-543e911751fe"). InnerVolumeSpecName "kube-api-access-kq2xb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.280388 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bee00f6c-5570-42f4-9594-543e911751fe-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.280466 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvqgt\" (UniqueName: \"kubernetes.io/projected/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-kube-api-access-kvqgt\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.280495 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kq2xb\" (UniqueName: \"kubernetes.io/projected/bee00f6c-5570-42f4-9594-543e911751fe-kube-api-access-kq2xb\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.280525 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.654401 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-84ef-account-create-update-ngb6s" event={"ID":"bee00f6c-5570-42f4-9594-543e911751fe","Type":"ContainerDied","Data":"73dbd8abd885609b1e7d7aab222cb314c8d653c4199cfee343d75436ca4a6ade"} Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.654729 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73dbd8abd885609b1e7d7aab222cb314c8d653c4199cfee343d75436ca4a6ade" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.654424 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-84ef-account-create-update-ngb6s" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.655802 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nq84p" event={"ID":"d76ada8d-3b62-4dc2-94f9-af552c0e4e1c","Type":"ContainerDied","Data":"336f13213db52d077d74c7c549d5ba2a551b249db7bade176afddb2ba0a1c9a3"} Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.655839 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="336f13213db52d077d74c7c549d5ba2a551b249db7bade176afddb2ba0a1c9a3" Jan 29 08:07:25 crc kubenswrapper[4861]: I0129 08:07:25.655940 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nq84p" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.157018 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-q7c5w"] Jan 29 08:07:27 crc kubenswrapper[4861]: E0129 08:07:27.157618 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bee00f6c-5570-42f4-9594-543e911751fe" containerName="mariadb-account-create-update" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.157636 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="bee00f6c-5570-42f4-9594-543e911751fe" containerName="mariadb-account-create-update" Jan 29 08:07:27 crc kubenswrapper[4861]: E0129 08:07:27.157654 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d76ada8d-3b62-4dc2-94f9-af552c0e4e1c" containerName="mariadb-database-create" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.157661 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d76ada8d-3b62-4dc2-94f9-af552c0e4e1c" containerName="mariadb-database-create" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.157843 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d76ada8d-3b62-4dc2-94f9-af552c0e4e1c" containerName="mariadb-database-create" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.157869 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="bee00f6c-5570-42f4-9594-543e911751fe" containerName="mariadb-account-create-update" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.158570 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.161230 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.161549 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.161944 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-bqxm6" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.172205 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-q7c5w"] Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.227511 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-combined-ca-bundle\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.227965 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-config\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.229662 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4299w\" (UniqueName: \"kubernetes.io/projected/83194fd3-8714-4812-99c5-5e3870600345-kube-api-access-4299w\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.331959 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4299w\" (UniqueName: \"kubernetes.io/projected/83194fd3-8714-4812-99c5-5e3870600345-kube-api-access-4299w\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.332089 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-combined-ca-bundle\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.332129 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-config\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.338925 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-config\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.341562 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-combined-ca-bundle\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.352524 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4299w\" (UniqueName: \"kubernetes.io/projected/83194fd3-8714-4812-99c5-5e3870600345-kube-api-access-4299w\") pod \"neutron-db-sync-q7c5w\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.492388 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:27 crc kubenswrapper[4861]: I0129 08:07:27.858475 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-q7c5w"] Jan 29 08:07:28 crc kubenswrapper[4861]: I0129 08:07:28.707068 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-q7c5w" event={"ID":"83194fd3-8714-4812-99c5-5e3870600345","Type":"ContainerStarted","Data":"9edbd5bdbffa3d969057cc493e8c0acc3077ea2208cbb563b179e3c324002a86"} Jan 29 08:07:28 crc kubenswrapper[4861]: I0129 08:07:28.707599 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-q7c5w" event={"ID":"83194fd3-8714-4812-99c5-5e3870600345","Type":"ContainerStarted","Data":"afcdd51e7b672dc1704ac9424f3da5a8604ccef25cabab282daf09975d17f0b4"} Jan 29 08:07:28 crc kubenswrapper[4861]: I0129 08:07:28.731897 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-q7c5w" podStartSLOduration=1.731875305 podStartE2EDuration="1.731875305s" podCreationTimestamp="2026-01-29 08:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:07:28.728199339 +0000 UTC m=+5540.399693936" watchObservedRunningTime="2026-01-29 08:07:28.731875305 +0000 UTC m=+5540.403369862" Jan 29 08:07:30 crc kubenswrapper[4861]: I0129 08:07:30.080817 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-94df6"] Jan 29 08:07:30 crc kubenswrapper[4861]: I0129 08:07:30.089522 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-94df6"] Jan 29 08:07:31 crc kubenswrapper[4861]: I0129 08:07:31.131725 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4862baf6-8fde-4e47-9667-0193440f9c36" path="/var/lib/kubelet/pods/4862baf6-8fde-4e47-9667-0193440f9c36/volumes" Jan 29 08:07:32 crc kubenswrapper[4861]: I0129 08:07:32.778691 4861 generic.go:334] "Generic (PLEG): container finished" podID="83194fd3-8714-4812-99c5-5e3870600345" containerID="9edbd5bdbffa3d969057cc493e8c0acc3077ea2208cbb563b179e3c324002a86" exitCode=0 Jan 29 08:07:32 crc kubenswrapper[4861]: I0129 08:07:32.778870 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-q7c5w" event={"ID":"83194fd3-8714-4812-99c5-5e3870600345","Type":"ContainerDied","Data":"9edbd5bdbffa3d969057cc493e8c0acc3077ea2208cbb563b179e3c324002a86"} Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.114887 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.165972 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4299w\" (UniqueName: \"kubernetes.io/projected/83194fd3-8714-4812-99c5-5e3870600345-kube-api-access-4299w\") pod \"83194fd3-8714-4812-99c5-5e3870600345\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.166383 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-config\") pod \"83194fd3-8714-4812-99c5-5e3870600345\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.166724 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-combined-ca-bundle\") pod \"83194fd3-8714-4812-99c5-5e3870600345\" (UID: \"83194fd3-8714-4812-99c5-5e3870600345\") " Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.171383 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83194fd3-8714-4812-99c5-5e3870600345-kube-api-access-4299w" (OuterVolumeSpecName: "kube-api-access-4299w") pod "83194fd3-8714-4812-99c5-5e3870600345" (UID: "83194fd3-8714-4812-99c5-5e3870600345"). InnerVolumeSpecName "kube-api-access-4299w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.189216 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83194fd3-8714-4812-99c5-5e3870600345" (UID: "83194fd3-8714-4812-99c5-5e3870600345"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.195441 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-config" (OuterVolumeSpecName: "config") pod "83194fd3-8714-4812-99c5-5e3870600345" (UID: "83194fd3-8714-4812-99c5-5e3870600345"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.269939 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.269985 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4299w\" (UniqueName: \"kubernetes.io/projected/83194fd3-8714-4812-99c5-5e3870600345-kube-api-access-4299w\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.270010 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/83194fd3-8714-4812-99c5-5e3870600345-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.815997 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-q7c5w" event={"ID":"83194fd3-8714-4812-99c5-5e3870600345","Type":"ContainerDied","Data":"afcdd51e7b672dc1704ac9424f3da5a8604ccef25cabab282daf09975d17f0b4"} Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.816371 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afcdd51e7b672dc1704ac9424f3da5a8604ccef25cabab282daf09975d17f0b4" Jan 29 08:07:34 crc kubenswrapper[4861]: I0129 08:07:34.816104 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-q7c5w" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.030232 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-879b6795f-7qc46"] Jan 29 08:07:35 crc kubenswrapper[4861]: E0129 08:07:35.030632 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83194fd3-8714-4812-99c5-5e3870600345" containerName="neutron-db-sync" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.030646 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="83194fd3-8714-4812-99c5-5e3870600345" containerName="neutron-db-sync" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.030848 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="83194fd3-8714-4812-99c5-5e3870600345" containerName="neutron-db-sync" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.037863 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.054214 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-879b6795f-7qc46"] Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.128395 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-869bc88768-7zjks"] Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.129706 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.132875 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.133290 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-bqxm6" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.133426 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.133602 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.134343 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-869bc88768-7zjks"] Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.193423 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-config\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.193486 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-sb\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.193524 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn65l\" (UniqueName: \"kubernetes.io/projected/d5f913e4-be24-42aa-8c80-bb119bf869c9-kube-api-access-cn65l\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.193541 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-nb\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.193626 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-dns-svc\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.294678 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-config\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295022 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-config\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295166 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-combined-ca-bundle\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295289 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-sb\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295352 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-httpd-config\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295423 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-nb\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295442 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn65l\" (UniqueName: \"kubernetes.io/projected/d5f913e4-be24-42aa-8c80-bb119bf869c9-kube-api-access-cn65l\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295530 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-ovndb-tls-certs\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295592 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lffvv\" (UniqueName: \"kubernetes.io/projected/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-kube-api-access-lffvv\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295750 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-dns-svc\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.295905 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-config\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.296053 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-nb\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.296296 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-sb\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.296481 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-dns-svc\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.314968 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn65l\" (UniqueName: \"kubernetes.io/projected/d5f913e4-be24-42aa-8c80-bb119bf869c9-kube-api-access-cn65l\") pod \"dnsmasq-dns-879b6795f-7qc46\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.360060 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.396957 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-httpd-config\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.397315 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-ovndb-tls-certs\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.397447 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lffvv\" (UniqueName: \"kubernetes.io/projected/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-kube-api-access-lffvv\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.397650 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-config\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.397792 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-combined-ca-bundle\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.400976 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-combined-ca-bundle\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.401362 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-ovndb-tls-certs\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.401853 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-config\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.402501 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-httpd-config\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.425130 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lffvv\" (UniqueName: \"kubernetes.io/projected/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-kube-api-access-lffvv\") pod \"neutron-869bc88768-7zjks\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.454698 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.807657 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-879b6795f-7qc46"] Jan 29 08:07:35 crc kubenswrapper[4861]: I0129 08:07:35.828810 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-879b6795f-7qc46" event={"ID":"d5f913e4-be24-42aa-8c80-bb119bf869c9","Type":"ContainerStarted","Data":"bb7f29b6f82757e41e3f88330725403ec2103d71fdd70da3078da1c1cc1d24ad"} Jan 29 08:07:36 crc kubenswrapper[4861]: I0129 08:07:36.016287 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-869bc88768-7zjks"] Jan 29 08:07:36 crc kubenswrapper[4861]: I0129 08:07:36.839108 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-869bc88768-7zjks" event={"ID":"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292","Type":"ContainerStarted","Data":"5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3"} Jan 29 08:07:36 crc kubenswrapper[4861]: I0129 08:07:36.839841 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-869bc88768-7zjks" event={"ID":"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292","Type":"ContainerStarted","Data":"27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233"} Jan 29 08:07:36 crc kubenswrapper[4861]: I0129 08:07:36.839859 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-869bc88768-7zjks" event={"ID":"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292","Type":"ContainerStarted","Data":"b864327c9792a8f9ca09d21df48d454e093bc40067e6a94691abe0d5598f85a9"} Jan 29 08:07:36 crc kubenswrapper[4861]: I0129 08:07:36.841056 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:07:36 crc kubenswrapper[4861]: I0129 08:07:36.843223 4861 generic.go:334] "Generic (PLEG): container finished" podID="d5f913e4-be24-42aa-8c80-bb119bf869c9" containerID="3d032f45a0c54505c4a26f7cb5fa2403bb0037f4eb70b2843b51e351ed26e548" exitCode=0 Jan 29 08:07:36 crc kubenswrapper[4861]: I0129 08:07:36.843253 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-879b6795f-7qc46" event={"ID":"d5f913e4-be24-42aa-8c80-bb119bf869c9","Type":"ContainerDied","Data":"3d032f45a0c54505c4a26f7cb5fa2403bb0037f4eb70b2843b51e351ed26e548"} Jan 29 08:07:36 crc kubenswrapper[4861]: I0129 08:07:36.872809 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-869bc88768-7zjks" podStartSLOduration=1.872789185 podStartE2EDuration="1.872789185s" podCreationTimestamp="2026-01-29 08:07:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:07:36.872278121 +0000 UTC m=+5548.543772688" watchObservedRunningTime="2026-01-29 08:07:36.872789185 +0000 UTC m=+5548.544283752" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.547395 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7dc5d457f5-tv7x9"] Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.549457 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.552308 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.552442 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.565050 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7dc5d457f5-tv7x9"] Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.744631 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gqm5\" (UniqueName: \"kubernetes.io/projected/758c49ec-0604-450b-8d71-6a01e3993cb6-kube-api-access-2gqm5\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.744678 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-config\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.744862 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-internal-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.745052 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-public-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.745108 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-ovndb-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.745223 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-combined-ca-bundle\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.745426 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-httpd-config\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.847641 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-httpd-config\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.847692 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gqm5\" (UniqueName: \"kubernetes.io/projected/758c49ec-0604-450b-8d71-6a01e3993cb6-kube-api-access-2gqm5\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.847711 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-config\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.847744 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-internal-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.847792 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-public-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.847811 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-ovndb-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.847844 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-combined-ca-bundle\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.854322 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-combined-ca-bundle\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.854361 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-ovndb-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.854376 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-config\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.854504 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-public-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.858522 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-httpd-config\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.862780 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/758c49ec-0604-450b-8d71-6a01e3993cb6-internal-tls-certs\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.878143 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gqm5\" (UniqueName: \"kubernetes.io/projected/758c49ec-0604-450b-8d71-6a01e3993cb6-kube-api-access-2gqm5\") pod \"neutron-7dc5d457f5-tv7x9\" (UID: \"758c49ec-0604-450b-8d71-6a01e3993cb6\") " pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.879834 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-879b6795f-7qc46" event={"ID":"d5f913e4-be24-42aa-8c80-bb119bf869c9","Type":"ContainerStarted","Data":"e151c214cb39f7bfdabefd498bf45542d4eb81e943f876a6fd0b076a2a6dd94a"} Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.879880 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.897991 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-879b6795f-7qc46" podStartSLOduration=2.897973526 podStartE2EDuration="2.897973526s" podCreationTimestamp="2026-01-29 08:07:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:07:37.896125238 +0000 UTC m=+5549.567619805" watchObservedRunningTime="2026-01-29 08:07:37.897973526 +0000 UTC m=+5549.569468083" Jan 29 08:07:37 crc kubenswrapper[4861]: I0129 08:07:37.901703 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:38 crc kubenswrapper[4861]: I0129 08:07:38.489618 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7dc5d457f5-tv7x9"] Jan 29 08:07:38 crc kubenswrapper[4861]: W0129 08:07:38.497445 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod758c49ec_0604_450b_8d71_6a01e3993cb6.slice/crio-3a5d2f409ba6c02c7b5d9182ce3f810a92efdf38103c7f769359c45705a4b76d WatchSource:0}: Error finding container 3a5d2f409ba6c02c7b5d9182ce3f810a92efdf38103c7f769359c45705a4b76d: Status 404 returned error can't find the container with id 3a5d2f409ba6c02c7b5d9182ce3f810a92efdf38103c7f769359c45705a4b76d Jan 29 08:07:38 crc kubenswrapper[4861]: I0129 08:07:38.889454 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7dc5d457f5-tv7x9" event={"ID":"758c49ec-0604-450b-8d71-6a01e3993cb6","Type":"ContainerStarted","Data":"8217ef8049b23f58e59a6e8472a74af45a7564cb5f15ed02d3ae2c21dfc0c6d8"} Jan 29 08:07:38 crc kubenswrapper[4861]: I0129 08:07:38.889882 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7dc5d457f5-tv7x9" event={"ID":"758c49ec-0604-450b-8d71-6a01e3993cb6","Type":"ContainerStarted","Data":"580343a6fb273ce6a0baee47991b0daa948da56b879ccbededa01cbdd063c7b8"} Jan 29 08:07:38 crc kubenswrapper[4861]: I0129 08:07:38.889900 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7dc5d457f5-tv7x9" event={"ID":"758c49ec-0604-450b-8d71-6a01e3993cb6","Type":"ContainerStarted","Data":"3a5d2f409ba6c02c7b5d9182ce3f810a92efdf38103c7f769359c45705a4b76d"} Jan 29 08:07:38 crc kubenswrapper[4861]: I0129 08:07:38.911527 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7dc5d457f5-tv7x9" podStartSLOduration=1.911503483 podStartE2EDuration="1.911503483s" podCreationTimestamp="2026-01-29 08:07:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:07:38.905433014 +0000 UTC m=+5550.576927571" watchObservedRunningTime="2026-01-29 08:07:38.911503483 +0000 UTC m=+5550.582998040" Jan 29 08:07:39 crc kubenswrapper[4861]: I0129 08:07:39.898409 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.362324 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.427288 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fcd9d5db5-mx8h4"] Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.427538 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" podUID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerName="dnsmasq-dns" containerID="cri-o://bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1" gracePeriod=10 Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.905043 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.950908 4861 generic.go:334] "Generic (PLEG): container finished" podID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerID="bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1" exitCode=0 Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.950958 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" event={"ID":"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c","Type":"ContainerDied","Data":"bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1"} Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.950991 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" event={"ID":"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c","Type":"ContainerDied","Data":"c88ff5efc93252de47eb84b989273293e526b169ff434067d3e0d374f2532751"} Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.951012 4861 scope.go:117] "RemoveContainer" containerID="bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1" Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.951155 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" Jan 29 08:07:45 crc kubenswrapper[4861]: I0129 08:07:45.981791 4861 scope.go:117] "RemoveContainer" containerID="cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.008529 4861 scope.go:117] "RemoveContainer" containerID="bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1" Jan 29 08:07:46 crc kubenswrapper[4861]: E0129 08:07:46.009231 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1\": container with ID starting with bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1 not found: ID does not exist" containerID="bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.009280 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1"} err="failed to get container status \"bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1\": rpc error: code = NotFound desc = could not find container \"bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1\": container with ID starting with bce1ddc38daaf03e0a55750f0381cb1d56a262c2b22516b6fb528af6e8c460a1 not found: ID does not exist" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.009327 4861 scope.go:117] "RemoveContainer" containerID="cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537" Jan 29 08:07:46 crc kubenswrapper[4861]: E0129 08:07:46.009833 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537\": container with ID starting with cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537 not found: ID does not exist" containerID="cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.009865 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537"} err="failed to get container status \"cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537\": rpc error: code = NotFound desc = could not find container \"cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537\": container with ID starting with cba13597c8b4298714022ec7580caa0e33d70f8c2da15efb2e8975eb026a4537 not found: ID does not exist" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.103015 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-nb\") pod \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.103138 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-dns-svc\") pod \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.103162 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rrq5\" (UniqueName: \"kubernetes.io/projected/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-kube-api-access-8rrq5\") pod \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.103317 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-config\") pod \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.103355 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-sb\") pod \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\" (UID: \"f058f08c-37a6-4ffe-aa06-6f1fa922aa1c\") " Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.130953 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-kube-api-access-8rrq5" (OuterVolumeSpecName: "kube-api-access-8rrq5") pod "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" (UID: "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c"). InnerVolumeSpecName "kube-api-access-8rrq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.143991 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" (UID: "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.159914 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" (UID: "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.176380 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" (UID: "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.180542 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-config" (OuterVolumeSpecName: "config") pod "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" (UID: "f058f08c-37a6-4ffe-aa06-6f1fa922aa1c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.204920 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.204954 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.204966 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.204977 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.204987 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rrq5\" (UniqueName: \"kubernetes.io/projected/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c-kube-api-access-8rrq5\") on node \"crc\" DevicePath \"\"" Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.313977 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fcd9d5db5-mx8h4"] Jan 29 08:07:46 crc kubenswrapper[4861]: I0129 08:07:46.329396 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fcd9d5db5-mx8h4"] Jan 29 08:07:47 crc kubenswrapper[4861]: I0129 08:07:47.138421 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" path="/var/lib/kubelet/pods/f058f08c-37a6-4ffe-aa06-6f1fa922aa1c/volumes" Jan 29 08:07:50 crc kubenswrapper[4861]: I0129 08:07:50.751243 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7fcd9d5db5-mx8h4" podUID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.40:5353: i/o timeout" Jan 29 08:08:05 crc kubenswrapper[4861]: I0129 08:08:05.469978 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:08:07 crc kubenswrapper[4861]: I0129 08:08:07.919871 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7dc5d457f5-tv7x9" Jan 29 08:08:07 crc kubenswrapper[4861]: I0129 08:08:07.982003 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-869bc88768-7zjks"] Jan 29 08:08:07 crc kubenswrapper[4861]: I0129 08:08:07.982606 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-869bc88768-7zjks" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerName="neutron-api" containerID="cri-o://27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233" gracePeriod=30 Jan 29 08:08:07 crc kubenswrapper[4861]: I0129 08:08:07.982735 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-869bc88768-7zjks" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerName="neutron-httpd" containerID="cri-o://5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3" gracePeriod=30 Jan 29 08:08:09 crc kubenswrapper[4861]: I0129 08:08:09.184782 4861 generic.go:334] "Generic (PLEG): container finished" podID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerID="5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3" exitCode=0 Jan 29 08:08:09 crc kubenswrapper[4861]: I0129 08:08:09.184923 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-869bc88768-7zjks" event={"ID":"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292","Type":"ContainerDied","Data":"5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3"} Jan 29 08:08:11 crc kubenswrapper[4861]: I0129 08:08:11.956544 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.138942 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-config\") pod \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.139004 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lffvv\" (UniqueName: \"kubernetes.io/projected/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-kube-api-access-lffvv\") pod \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.139050 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-httpd-config\") pod \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.139367 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-ovndb-tls-certs\") pod \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.139476 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-combined-ca-bundle\") pod \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\" (UID: \"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292\") " Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.145562 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" (UID: "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.146957 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-kube-api-access-lffvv" (OuterVolumeSpecName: "kube-api-access-lffvv") pod "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" (UID: "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292"). InnerVolumeSpecName "kube-api-access-lffvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.218310 4861 generic.go:334] "Generic (PLEG): container finished" podID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerID="27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233" exitCode=0 Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.218368 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-869bc88768-7zjks" event={"ID":"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292","Type":"ContainerDied","Data":"27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233"} Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.218405 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-869bc88768-7zjks" event={"ID":"a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292","Type":"ContainerDied","Data":"b864327c9792a8f9ca09d21df48d454e093bc40067e6a94691abe0d5598f85a9"} Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.218432 4861 scope.go:117] "RemoveContainer" containerID="5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.218492 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-869bc88768-7zjks" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.218795 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-config" (OuterVolumeSpecName: "config") pod "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" (UID: "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.224267 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" (UID: "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.237565 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" (UID: "a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.247576 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.247611 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.247622 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lffvv\" (UniqueName: \"kubernetes.io/projected/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-kube-api-access-lffvv\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.247631 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.247642 4861 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.264828 4861 scope.go:117] "RemoveContainer" containerID="27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.286775 4861 scope.go:117] "RemoveContainer" containerID="5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3" Jan 29 08:08:12 crc kubenswrapper[4861]: E0129 08:08:12.287286 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3\": container with ID starting with 5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3 not found: ID does not exist" containerID="5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.287368 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3"} err="failed to get container status \"5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3\": rpc error: code = NotFound desc = could not find container \"5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3\": container with ID starting with 5e2ed0a0d0ca0421df948b5a30f5b573ffb3d6592e6333042ce89b5bb547d7c3 not found: ID does not exist" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.287404 4861 scope.go:117] "RemoveContainer" containerID="27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233" Jan 29 08:08:12 crc kubenswrapper[4861]: E0129 08:08:12.287832 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233\": container with ID starting with 27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233 not found: ID does not exist" containerID="27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.287864 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233"} err="failed to get container status \"27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233\": rpc error: code = NotFound desc = could not find container \"27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233\": container with ID starting with 27e3a874f5a748345e6c7937ade9cb4dc53a19f817a027476cf4fb26d86f2233 not found: ID does not exist" Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.571523 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-869bc88768-7zjks"] Jan 29 08:08:12 crc kubenswrapper[4861]: I0129 08:08:12.584659 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-869bc88768-7zjks"] Jan 29 08:08:13 crc kubenswrapper[4861]: I0129 08:08:13.134148 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" path="/var/lib/kubelet/pods/a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292/volumes" Jan 29 08:08:25 crc kubenswrapper[4861]: I0129 08:08:25.978261 4861 scope.go:117] "RemoveContainer" containerID="7310c10ed96f4c8bababd44525f7781959353d494472d076dcc2bebae1c735d2" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.823421 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-9vcj8"] Jan 29 08:08:31 crc kubenswrapper[4861]: E0129 08:08:31.825044 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerName="neutron-httpd" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.825137 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerName="neutron-httpd" Jan 29 08:08:31 crc kubenswrapper[4861]: E0129 08:08:31.825218 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerName="neutron-api" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.825279 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerName="neutron-api" Jan 29 08:08:31 crc kubenswrapper[4861]: E0129 08:08:31.825339 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerName="dnsmasq-dns" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.825394 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerName="dnsmasq-dns" Jan 29 08:08:31 crc kubenswrapper[4861]: E0129 08:08:31.825474 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerName="init" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.825536 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerName="init" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.825721 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerName="neutron-httpd" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.825792 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9eaea8e-c2f3-491b-a0ed-a17dbfe2b292" containerName="neutron-api" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.825852 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f058f08c-37a6-4ffe-aa06-6f1fa922aa1c" containerName="dnsmasq-dns" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.826433 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.832410 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.832506 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.832541 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-8jhr5" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.832617 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.832946 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.863167 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-9vcj8"] Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.914157 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-swiftconf\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.914244 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnmkg\" (UniqueName: \"kubernetes.io/projected/84b0d825-b891-429f-b7fe-6fc86904133c-kube-api-access-wnmkg\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.914306 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/84b0d825-b891-429f-b7fe-6fc86904133c-etc-swift\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.914324 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-ring-data-devices\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.914351 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-scripts\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.914383 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-dispersionconf\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.914407 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-combined-ca-bundle\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.931302 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75fd975745-6qzgv"] Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.933742 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:31 crc kubenswrapper[4861]: I0129 08:08:31.948048 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75fd975745-6qzgv"] Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.015962 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-dispersionconf\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016025 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-combined-ca-bundle\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016103 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-dns-svc\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016150 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-config\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016177 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8fc5\" (UniqueName: \"kubernetes.io/projected/ce9850c6-f728-4e4a-8f65-79edde1e5889-kube-api-access-s8fc5\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016207 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-swiftconf\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016231 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnmkg\" (UniqueName: \"kubernetes.io/projected/84b0d825-b891-429f-b7fe-6fc86904133c-kube-api-access-wnmkg\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016280 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-sb\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016321 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/84b0d825-b891-429f-b7fe-6fc86904133c-etc-swift\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016343 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-ring-data-devices\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016367 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-nb\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.016398 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-scripts\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.017309 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-scripts\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.018358 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/84b0d825-b891-429f-b7fe-6fc86904133c-etc-swift\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.018726 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-ring-data-devices\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.023516 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-dispersionconf\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.023771 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-swiftconf\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.023892 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-combined-ca-bundle\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.042569 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnmkg\" (UniqueName: \"kubernetes.io/projected/84b0d825-b891-429f-b7fe-6fc86904133c-kube-api-access-wnmkg\") pod \"swift-ring-rebalance-9vcj8\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.118985 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-sb\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.119154 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-sb\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.119720 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-nb\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.119891 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-dns-svc\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.120017 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-config\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.120179 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8fc5\" (UniqueName: \"kubernetes.io/projected/ce9850c6-f728-4e4a-8f65-79edde1e5889-kube-api-access-s8fc5\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.120564 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-nb\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.121193 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-dns-svc\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.121336 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-config\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.136505 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8fc5\" (UniqueName: \"kubernetes.io/projected/ce9850c6-f728-4e4a-8f65-79edde1e5889-kube-api-access-s8fc5\") pod \"dnsmasq-dns-75fd975745-6qzgv\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.145829 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.256646 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:32 crc kubenswrapper[4861]: W0129 08:08:32.596552 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84b0d825_b891_429f_b7fe_6fc86904133c.slice/crio-002a6d495b2147ee750d8b76fbc514bb58fe96c26d6ad11514fc769f936f01f1 WatchSource:0}: Error finding container 002a6d495b2147ee750d8b76fbc514bb58fe96c26d6ad11514fc769f936f01f1: Status 404 returned error can't find the container with id 002a6d495b2147ee750d8b76fbc514bb58fe96c26d6ad11514fc769f936f01f1 Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.608682 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-9vcj8"] Jan 29 08:08:32 crc kubenswrapper[4861]: I0129 08:08:32.724986 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75fd975745-6qzgv"] Jan 29 08:08:32 crc kubenswrapper[4861]: W0129 08:08:32.730844 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce9850c6_f728_4e4a_8f65_79edde1e5889.slice/crio-14f8dde3d090c884f340c06473b0e27ec2fe6aab36160dea7ed8ca9cdea41ca1 WatchSource:0}: Error finding container 14f8dde3d090c884f340c06473b0e27ec2fe6aab36160dea7ed8ca9cdea41ca1: Status 404 returned error can't find the container with id 14f8dde3d090c884f340c06473b0e27ec2fe6aab36160dea7ed8ca9cdea41ca1 Jan 29 08:08:33 crc kubenswrapper[4861]: I0129 08:08:33.475694 4861 generic.go:334] "Generic (PLEG): container finished" podID="ce9850c6-f728-4e4a-8f65-79edde1e5889" containerID="375ff8ec1f3a9043eab722981545c37818872d305af322159deeb58d8fb612be" exitCode=0 Jan 29 08:08:33 crc kubenswrapper[4861]: I0129 08:08:33.475763 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" event={"ID":"ce9850c6-f728-4e4a-8f65-79edde1e5889","Type":"ContainerDied","Data":"375ff8ec1f3a9043eab722981545c37818872d305af322159deeb58d8fb612be"} Jan 29 08:08:33 crc kubenswrapper[4861]: I0129 08:08:33.475983 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" event={"ID":"ce9850c6-f728-4e4a-8f65-79edde1e5889","Type":"ContainerStarted","Data":"14f8dde3d090c884f340c06473b0e27ec2fe6aab36160dea7ed8ca9cdea41ca1"} Jan 29 08:08:33 crc kubenswrapper[4861]: I0129 08:08:33.477839 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9vcj8" event={"ID":"84b0d825-b891-429f-b7fe-6fc86904133c","Type":"ContainerStarted","Data":"109b9e2199d97993c0ddf16a2cfed14732208638d124913ca614b67c33046fb1"} Jan 29 08:08:33 crc kubenswrapper[4861]: I0129 08:08:33.477862 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9vcj8" event={"ID":"84b0d825-b891-429f-b7fe-6fc86904133c","Type":"ContainerStarted","Data":"002a6d495b2147ee750d8b76fbc514bb58fe96c26d6ad11514fc769f936f01f1"} Jan 29 08:08:33 crc kubenswrapper[4861]: I0129 08:08:33.538841 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-9vcj8" podStartSLOduration=2.538824414 podStartE2EDuration="2.538824414s" podCreationTimestamp="2026-01-29 08:08:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:08:33.535223189 +0000 UTC m=+5605.206717746" watchObservedRunningTime="2026-01-29 08:08:33.538824414 +0000 UTC m=+5605.210318971" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.487442 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" event={"ID":"ce9850c6-f728-4e4a-8f65-79edde1e5889","Type":"ContainerStarted","Data":"d39a03fa46ac2e6d51b0b1a3e8b8e7b806aeeec60b7bede796ec9978104a0360"} Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.487780 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.513413 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" podStartSLOduration=3.51338889 podStartE2EDuration="3.51338889s" podCreationTimestamp="2026-01-29 08:08:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:08:34.505501603 +0000 UTC m=+5606.176996180" watchObservedRunningTime="2026-01-29 08:08:34.51338889 +0000 UTC m=+5606.184883457" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.590935 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-599b9c9d9b-lb8b5"] Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.592470 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.594470 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.620583 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-599b9c9d9b-lb8b5"] Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.667177 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4xkt\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-kube-api-access-z4xkt\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.667221 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-etc-swift\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.667274 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-run-httpd\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.667365 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-config-data\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.667386 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-combined-ca-bundle\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.667408 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-log-httpd\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.769490 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-run-httpd\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.769608 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-config-data\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.769633 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-combined-ca-bundle\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.769663 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-log-httpd\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.769700 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4xkt\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-kube-api-access-z4xkt\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.769730 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-etc-swift\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.770044 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-run-httpd\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.770583 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-log-httpd\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.775943 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-combined-ca-bundle\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.776216 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-config-data\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.777127 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-etc-swift\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.788219 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4xkt\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-kube-api-access-z4xkt\") pod \"swift-proxy-599b9c9d9b-lb8b5\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:34 crc kubenswrapper[4861]: I0129 08:08:34.923881 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:35 crc kubenswrapper[4861]: I0129 08:08:35.591832 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-599b9c9d9b-lb8b5"] Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.504765 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" event={"ID":"0c1952d9-308d-4d8e-867e-5c5f2812296a","Type":"ContainerStarted","Data":"b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0"} Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.505052 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" event={"ID":"0c1952d9-308d-4d8e-867e-5c5f2812296a","Type":"ContainerStarted","Data":"2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c"} Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.505066 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" event={"ID":"0c1952d9-308d-4d8e-867e-5c5f2812296a","Type":"ContainerStarted","Data":"b92909c7c56016efa1520c35a16cff2fc700bc7d6eb09b8489619489eac1285e"} Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.505132 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.505153 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.532358 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" podStartSLOduration=2.532339051 podStartE2EDuration="2.532339051s" podCreationTimestamp="2026-01-29 08:08:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:08:36.524559637 +0000 UTC m=+5608.196054214" watchObservedRunningTime="2026-01-29 08:08:36.532339051 +0000 UTC m=+5608.203833608" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.703770 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-578c88c7d6-cxzrb"] Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.705488 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.707725 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.708327 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.719036 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-578c88c7d6-cxzrb"] Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.804729 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr95h\" (UniqueName: \"kubernetes.io/projected/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-kube-api-access-jr95h\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.804804 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-combined-ca-bundle\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.804842 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-log-httpd\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.805088 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-public-tls-certs\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.805204 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-config-data\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.805301 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-internal-tls-certs\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.805364 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-run-httpd\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.805418 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-etc-swift\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907302 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-public-tls-certs\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907358 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-config-data\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907382 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-internal-tls-certs\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907406 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-run-httpd\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907428 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-etc-swift\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907494 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr95h\" (UniqueName: \"kubernetes.io/projected/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-kube-api-access-jr95h\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907537 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-log-httpd\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907552 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-combined-ca-bundle\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.907958 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-run-httpd\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.908124 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-log-httpd\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.911868 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-internal-tls-certs\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.911933 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-combined-ca-bundle\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.912716 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-config-data\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.912794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-public-tls-certs\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.926620 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-etc-swift\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:36 crc kubenswrapper[4861]: I0129 08:08:36.945933 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr95h\" (UniqueName: \"kubernetes.io/projected/7a3b8613-fa03-4ee0-90f0-bc02abb0e72b-kube-api-access-jr95h\") pod \"swift-proxy-578c88c7d6-cxzrb\" (UID: \"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b\") " pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:37 crc kubenswrapper[4861]: I0129 08:08:37.028728 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:37 crc kubenswrapper[4861]: I0129 08:08:37.517382 4861 generic.go:334] "Generic (PLEG): container finished" podID="84b0d825-b891-429f-b7fe-6fc86904133c" containerID="109b9e2199d97993c0ddf16a2cfed14732208638d124913ca614b67c33046fb1" exitCode=0 Jan 29 08:08:37 crc kubenswrapper[4861]: I0129 08:08:37.517479 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9vcj8" event={"ID":"84b0d825-b891-429f-b7fe-6fc86904133c","Type":"ContainerDied","Data":"109b9e2199d97993c0ddf16a2cfed14732208638d124913ca614b67c33046fb1"} Jan 29 08:08:37 crc kubenswrapper[4861]: W0129 08:08:37.785898 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a3b8613_fa03_4ee0_90f0_bc02abb0e72b.slice/crio-f023ebf8ef0d642aa99140ce506b00d7557d61777fede323d4a7c608f672459f WatchSource:0}: Error finding container f023ebf8ef0d642aa99140ce506b00d7557d61777fede323d4a7c608f672459f: Status 404 returned error can't find the container with id f023ebf8ef0d642aa99140ce506b00d7557d61777fede323d4a7c608f672459f Jan 29 08:08:37 crc kubenswrapper[4861]: I0129 08:08:37.801024 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-578c88c7d6-cxzrb"] Jan 29 08:08:38 crc kubenswrapper[4861]: I0129 08:08:38.531130 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-578c88c7d6-cxzrb" event={"ID":"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b","Type":"ContainerStarted","Data":"372a45e998776642cbc9f9d9ff61f1c040a65fc82473a73cf694a060650dec8e"} Jan 29 08:08:38 crc kubenswrapper[4861]: I0129 08:08:38.531539 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-578c88c7d6-cxzrb" event={"ID":"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b","Type":"ContainerStarted","Data":"cd5660ced2dad63e77cd876b2806d8f50c253d2339b701c13c0975d9a521f3f0"} Jan 29 08:08:38 crc kubenswrapper[4861]: I0129 08:08:38.531564 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-578c88c7d6-cxzrb" event={"ID":"7a3b8613-fa03-4ee0-90f0-bc02abb0e72b","Type":"ContainerStarted","Data":"f023ebf8ef0d642aa99140ce506b00d7557d61777fede323d4a7c608f672459f"} Jan 29 08:08:38 crc kubenswrapper[4861]: I0129 08:08:38.575329 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-578c88c7d6-cxzrb" podStartSLOduration=2.575305761 podStartE2EDuration="2.575305761s" podCreationTimestamp="2026-01-29 08:08:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:08:38.560686478 +0000 UTC m=+5610.232181085" watchObservedRunningTime="2026-01-29 08:08:38.575305761 +0000 UTC m=+5610.246800318" Jan 29 08:08:38 crc kubenswrapper[4861]: I0129 08:08:38.961116 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.055355 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnmkg\" (UniqueName: \"kubernetes.io/projected/84b0d825-b891-429f-b7fe-6fc86904133c-kube-api-access-wnmkg\") pod \"84b0d825-b891-429f-b7fe-6fc86904133c\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.055427 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-combined-ca-bundle\") pod \"84b0d825-b891-429f-b7fe-6fc86904133c\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.055650 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-dispersionconf\") pod \"84b0d825-b891-429f-b7fe-6fc86904133c\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.055691 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/84b0d825-b891-429f-b7fe-6fc86904133c-etc-swift\") pod \"84b0d825-b891-429f-b7fe-6fc86904133c\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.055745 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-scripts\") pod \"84b0d825-b891-429f-b7fe-6fc86904133c\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.055787 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-ring-data-devices\") pod \"84b0d825-b891-429f-b7fe-6fc86904133c\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.055814 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-swiftconf\") pod \"84b0d825-b891-429f-b7fe-6fc86904133c\" (UID: \"84b0d825-b891-429f-b7fe-6fc86904133c\") " Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.056569 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "84b0d825-b891-429f-b7fe-6fc86904133c" (UID: "84b0d825-b891-429f-b7fe-6fc86904133c"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.056793 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84b0d825-b891-429f-b7fe-6fc86904133c-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "84b0d825-b891-429f-b7fe-6fc86904133c" (UID: "84b0d825-b891-429f-b7fe-6fc86904133c"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.057364 4861 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/84b0d825-b891-429f-b7fe-6fc86904133c-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.057416 4861 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.074715 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84b0d825-b891-429f-b7fe-6fc86904133c-kube-api-access-wnmkg" (OuterVolumeSpecName: "kube-api-access-wnmkg") pod "84b0d825-b891-429f-b7fe-6fc86904133c" (UID: "84b0d825-b891-429f-b7fe-6fc86904133c"). InnerVolumeSpecName "kube-api-access-wnmkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.083897 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "84b0d825-b891-429f-b7fe-6fc86904133c" (UID: "84b0d825-b891-429f-b7fe-6fc86904133c"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.086970 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84b0d825-b891-429f-b7fe-6fc86904133c" (UID: "84b0d825-b891-429f-b7fe-6fc86904133c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.087450 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-scripts" (OuterVolumeSpecName: "scripts") pod "84b0d825-b891-429f-b7fe-6fc86904133c" (UID: "84b0d825-b891-429f-b7fe-6fc86904133c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.101094 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "84b0d825-b891-429f-b7fe-6fc86904133c" (UID: "84b0d825-b891-429f-b7fe-6fc86904133c"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.173021 4861 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.173087 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/84b0d825-b891-429f-b7fe-6fc86904133c-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.173098 4861 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.173107 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnmkg\" (UniqueName: \"kubernetes.io/projected/84b0d825-b891-429f-b7fe-6fc86904133c-kube-api-access-wnmkg\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.173118 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b0d825-b891-429f-b7fe-6fc86904133c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.542857 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9vcj8" event={"ID":"84b0d825-b891-429f-b7fe-6fc86904133c","Type":"ContainerDied","Data":"002a6d495b2147ee750d8b76fbc514bb58fe96c26d6ad11514fc769f936f01f1"} Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.542920 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="002a6d495b2147ee750d8b76fbc514bb58fe96c26d6ad11514fc769f936f01f1" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.542882 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9vcj8" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.543036 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:39 crc kubenswrapper[4861]: I0129 08:08:39.543067 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.259428 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.367243 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-879b6795f-7qc46"] Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.367612 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-879b6795f-7qc46" podUID="d5f913e4-be24-42aa-8c80-bb119bf869c9" containerName="dnsmasq-dns" containerID="cri-o://e151c214cb39f7bfdabefd498bf45542d4eb81e943f876a6fd0b076a2a6dd94a" gracePeriod=10 Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.572283 4861 generic.go:334] "Generic (PLEG): container finished" podID="d5f913e4-be24-42aa-8c80-bb119bf869c9" containerID="e151c214cb39f7bfdabefd498bf45542d4eb81e943f876a6fd0b076a2a6dd94a" exitCode=0 Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.572476 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-879b6795f-7qc46" event={"ID":"d5f913e4-be24-42aa-8c80-bb119bf869c9","Type":"ContainerDied","Data":"e151c214cb39f7bfdabefd498bf45542d4eb81e943f876a6fd0b076a2a6dd94a"} Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.878936 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.949330 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-sb\") pod \"d5f913e4-be24-42aa-8c80-bb119bf869c9\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.949412 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-dns-svc\") pod \"d5f913e4-be24-42aa-8c80-bb119bf869c9\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.949454 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-nb\") pod \"d5f913e4-be24-42aa-8c80-bb119bf869c9\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.949512 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-config\") pod \"d5f913e4-be24-42aa-8c80-bb119bf869c9\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.949664 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cn65l\" (UniqueName: \"kubernetes.io/projected/d5f913e4-be24-42aa-8c80-bb119bf869c9-kube-api-access-cn65l\") pod \"d5f913e4-be24-42aa-8c80-bb119bf869c9\" (UID: \"d5f913e4-be24-42aa-8c80-bb119bf869c9\") " Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.954222 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5f913e4-be24-42aa-8c80-bb119bf869c9-kube-api-access-cn65l" (OuterVolumeSpecName: "kube-api-access-cn65l") pod "d5f913e4-be24-42aa-8c80-bb119bf869c9" (UID: "d5f913e4-be24-42aa-8c80-bb119bf869c9"). InnerVolumeSpecName "kube-api-access-cn65l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.987113 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d5f913e4-be24-42aa-8c80-bb119bf869c9" (UID: "d5f913e4-be24-42aa-8c80-bb119bf869c9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.995034 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d5f913e4-be24-42aa-8c80-bb119bf869c9" (UID: "d5f913e4-be24-42aa-8c80-bb119bf869c9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:08:42 crc kubenswrapper[4861]: I0129 08:08:42.999858 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d5f913e4-be24-42aa-8c80-bb119bf869c9" (UID: "d5f913e4-be24-42aa-8c80-bb119bf869c9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.000823 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-config" (OuterVolumeSpecName: "config") pod "d5f913e4-be24-42aa-8c80-bb119bf869c9" (UID: "d5f913e4-be24-42aa-8c80-bb119bf869c9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.051420 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cn65l\" (UniqueName: \"kubernetes.io/projected/d5f913e4-be24-42aa-8c80-bb119bf869c9-kube-api-access-cn65l\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.051462 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.051472 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.051480 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.051513 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5f913e4-be24-42aa-8c80-bb119bf869c9-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.585038 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-879b6795f-7qc46" event={"ID":"d5f913e4-be24-42aa-8c80-bb119bf869c9","Type":"ContainerDied","Data":"bb7f29b6f82757e41e3f88330725403ec2103d71fdd70da3078da1c1cc1d24ad"} Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.585145 4861 scope.go:117] "RemoveContainer" containerID="e151c214cb39f7bfdabefd498bf45542d4eb81e943f876a6fd0b076a2a6dd94a" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.585435 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-879b6795f-7qc46" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.622559 4861 scope.go:117] "RemoveContainer" containerID="3d032f45a0c54505c4a26f7cb5fa2403bb0037f4eb70b2843b51e351ed26e548" Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.634117 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-879b6795f-7qc46"] Jan 29 08:08:43 crc kubenswrapper[4861]: I0129 08:08:43.645352 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-879b6795f-7qc46"] Jan 29 08:08:44 crc kubenswrapper[4861]: I0129 08:08:44.926732 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:44 crc kubenswrapper[4861]: I0129 08:08:44.927565 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:45 crc kubenswrapper[4861]: I0129 08:08:45.130851 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5f913e4-be24-42aa-8c80-bb119bf869c9" path="/var/lib/kubelet/pods/d5f913e4-be24-42aa-8c80-bb119bf869c9/volumes" Jan 29 08:08:47 crc kubenswrapper[4861]: I0129 08:08:47.034860 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:47 crc kubenswrapper[4861]: I0129 08:08:47.038151 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-578c88c7d6-cxzrb" Jan 29 08:08:47 crc kubenswrapper[4861]: I0129 08:08:47.162219 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-599b9c9d9b-lb8b5"] Jan 29 08:08:47 crc kubenswrapper[4861]: I0129 08:08:47.162531 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerName="proxy-httpd" containerID="cri-o://2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c" gracePeriod=30 Jan 29 08:08:47 crc kubenswrapper[4861]: I0129 08:08:47.162719 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerName="proxy-server" containerID="cri-o://b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0" gracePeriod=30 Jan 29 08:08:47 crc kubenswrapper[4861]: I0129 08:08:47.623737 4861 generic.go:334] "Generic (PLEG): container finished" podID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerID="2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c" exitCode=0 Jan 29 08:08:47 crc kubenswrapper[4861]: I0129 08:08:47.623851 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" event={"ID":"0c1952d9-308d-4d8e-867e-5c5f2812296a","Type":"ContainerDied","Data":"2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c"} Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.317203 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.448810 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4xkt\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-kube-api-access-z4xkt\") pod \"0c1952d9-308d-4d8e-867e-5c5f2812296a\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.448868 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-combined-ca-bundle\") pod \"0c1952d9-308d-4d8e-867e-5c5f2812296a\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.448906 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-run-httpd\") pod \"0c1952d9-308d-4d8e-867e-5c5f2812296a\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.448970 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-log-httpd\") pod \"0c1952d9-308d-4d8e-867e-5c5f2812296a\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.448991 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-config-data\") pod \"0c1952d9-308d-4d8e-867e-5c5f2812296a\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.449020 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-etc-swift\") pod \"0c1952d9-308d-4d8e-867e-5c5f2812296a\" (UID: \"0c1952d9-308d-4d8e-867e-5c5f2812296a\") " Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.452500 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0c1952d9-308d-4d8e-867e-5c5f2812296a" (UID: "0c1952d9-308d-4d8e-867e-5c5f2812296a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.452573 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0c1952d9-308d-4d8e-867e-5c5f2812296a" (UID: "0c1952d9-308d-4d8e-867e-5c5f2812296a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.455898 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "0c1952d9-308d-4d8e-867e-5c5f2812296a" (UID: "0c1952d9-308d-4d8e-867e-5c5f2812296a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.456809 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-kube-api-access-z4xkt" (OuterVolumeSpecName: "kube-api-access-z4xkt") pod "0c1952d9-308d-4d8e-867e-5c5f2812296a" (UID: "0c1952d9-308d-4d8e-867e-5c5f2812296a"). InnerVolumeSpecName "kube-api-access-z4xkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.507146 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c1952d9-308d-4d8e-867e-5c5f2812296a" (UID: "0c1952d9-308d-4d8e-867e-5c5f2812296a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.509180 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-config-data" (OuterVolumeSpecName: "config-data") pod "0c1952d9-308d-4d8e-867e-5c5f2812296a" (UID: "0c1952d9-308d-4d8e-867e-5c5f2812296a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.551932 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4xkt\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-kube-api-access-z4xkt\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.551996 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.552007 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.552016 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0c1952d9-308d-4d8e-867e-5c5f2812296a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.552029 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c1952d9-308d-4d8e-867e-5c5f2812296a-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.552040 4861 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c1952d9-308d-4d8e-867e-5c5f2812296a-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.632741 4861 generic.go:334] "Generic (PLEG): container finished" podID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerID="b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0" exitCode=0 Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.632778 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" event={"ID":"0c1952d9-308d-4d8e-867e-5c5f2812296a","Type":"ContainerDied","Data":"b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0"} Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.632802 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" event={"ID":"0c1952d9-308d-4d8e-867e-5c5f2812296a","Type":"ContainerDied","Data":"b92909c7c56016efa1520c35a16cff2fc700bc7d6eb09b8489619489eac1285e"} Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.632830 4861 scope.go:117] "RemoveContainer" containerID="b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.632966 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-599b9c9d9b-lb8b5" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.667936 4861 scope.go:117] "RemoveContainer" containerID="2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.673291 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-599b9c9d9b-lb8b5"] Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.680647 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-599b9c9d9b-lb8b5"] Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.692733 4861 scope.go:117] "RemoveContainer" containerID="b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0" Jan 29 08:08:48 crc kubenswrapper[4861]: E0129 08:08:48.693341 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0\": container with ID starting with b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0 not found: ID does not exist" containerID="b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.693371 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0"} err="failed to get container status \"b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0\": rpc error: code = NotFound desc = could not find container \"b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0\": container with ID starting with b4534df3d738f2b999be54dd585e6b8100e38750fb78570e096bca2f37c2a1c0 not found: ID does not exist" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.693396 4861 scope.go:117] "RemoveContainer" containerID="2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c" Jan 29 08:08:48 crc kubenswrapper[4861]: E0129 08:08:48.693712 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c\": container with ID starting with 2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c not found: ID does not exist" containerID="2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c" Jan 29 08:08:48 crc kubenswrapper[4861]: I0129 08:08:48.693731 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c"} err="failed to get container status \"2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c\": rpc error: code = NotFound desc = could not find container \"2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c\": container with ID starting with 2680482aae7e744391d5926980a186863b4a7a50b702a662da276f685dbd6a3c not found: ID does not exist" Jan 29 08:08:49 crc kubenswrapper[4861]: I0129 08:08:49.127443 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" path="/var/lib/kubelet/pods/0c1952d9-308d-4d8e-867e-5c5f2812296a/volumes" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.961518 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-n79dl"] Jan 29 08:08:52 crc kubenswrapper[4861]: E0129 08:08:52.962265 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5f913e4-be24-42aa-8c80-bb119bf869c9" containerName="dnsmasq-dns" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962281 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5f913e4-be24-42aa-8c80-bb119bf869c9" containerName="dnsmasq-dns" Jan 29 08:08:52 crc kubenswrapper[4861]: E0129 08:08:52.962298 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84b0d825-b891-429f-b7fe-6fc86904133c" containerName="swift-ring-rebalance" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962316 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="84b0d825-b891-429f-b7fe-6fc86904133c" containerName="swift-ring-rebalance" Jan 29 08:08:52 crc kubenswrapper[4861]: E0129 08:08:52.962332 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerName="proxy-server" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962341 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerName="proxy-server" Jan 29 08:08:52 crc kubenswrapper[4861]: E0129 08:08:52.962353 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerName="proxy-httpd" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962361 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerName="proxy-httpd" Jan 29 08:08:52 crc kubenswrapper[4861]: E0129 08:08:52.962375 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5f913e4-be24-42aa-8c80-bb119bf869c9" containerName="init" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962383 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5f913e4-be24-42aa-8c80-bb119bf869c9" containerName="init" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962595 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="84b0d825-b891-429f-b7fe-6fc86904133c" containerName="swift-ring-rebalance" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962613 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerName="proxy-httpd" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962629 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c1952d9-308d-4d8e-867e-5c5f2812296a" containerName="proxy-server" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.962654 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5f913e4-be24-42aa-8c80-bb119bf869c9" containerName="dnsmasq-dns" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.963354 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:52 crc kubenswrapper[4861]: I0129 08:08:52.974935 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-n79dl"] Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.073394 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-8618-account-create-update-v8qq4"] Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.074792 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.077170 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.079758 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-8618-account-create-update-v8qq4"] Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.130304 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4821b164-e444-4c00-a965-0bb0d722b944-operator-scripts\") pod \"cinder-db-create-n79dl\" (UID: \"4821b164-e444-4c00-a965-0bb0d722b944\") " pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.130743 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt8xs\" (UniqueName: \"kubernetes.io/projected/4821b164-e444-4c00-a965-0bb0d722b944-kube-api-access-mt8xs\") pod \"cinder-db-create-n79dl\" (UID: \"4821b164-e444-4c00-a965-0bb0d722b944\") " pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.232291 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt8xs\" (UniqueName: \"kubernetes.io/projected/4821b164-e444-4c00-a965-0bb0d722b944-kube-api-access-mt8xs\") pod \"cinder-db-create-n79dl\" (UID: \"4821b164-e444-4c00-a965-0bb0d722b944\") " pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.233034 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4821b164-e444-4c00-a965-0bb0d722b944-operator-scripts\") pod \"cinder-db-create-n79dl\" (UID: \"4821b164-e444-4c00-a965-0bb0d722b944\") " pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.233221 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks9f2\" (UniqueName: \"kubernetes.io/projected/485df95d-da0f-498c-9d95-38dfe0c8be8c-kube-api-access-ks9f2\") pod \"cinder-8618-account-create-update-v8qq4\" (UID: \"485df95d-da0f-498c-9d95-38dfe0c8be8c\") " pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.233543 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/485df95d-da0f-498c-9d95-38dfe0c8be8c-operator-scripts\") pod \"cinder-8618-account-create-update-v8qq4\" (UID: \"485df95d-da0f-498c-9d95-38dfe0c8be8c\") " pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.236310 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4821b164-e444-4c00-a965-0bb0d722b944-operator-scripts\") pod \"cinder-db-create-n79dl\" (UID: \"4821b164-e444-4c00-a965-0bb0d722b944\") " pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.266365 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt8xs\" (UniqueName: \"kubernetes.io/projected/4821b164-e444-4c00-a965-0bb0d722b944-kube-api-access-mt8xs\") pod \"cinder-db-create-n79dl\" (UID: \"4821b164-e444-4c00-a965-0bb0d722b944\") " pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.332266 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.335815 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/485df95d-da0f-498c-9d95-38dfe0c8be8c-operator-scripts\") pod \"cinder-8618-account-create-update-v8qq4\" (UID: \"485df95d-da0f-498c-9d95-38dfe0c8be8c\") " pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.336107 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks9f2\" (UniqueName: \"kubernetes.io/projected/485df95d-da0f-498c-9d95-38dfe0c8be8c-kube-api-access-ks9f2\") pod \"cinder-8618-account-create-update-v8qq4\" (UID: \"485df95d-da0f-498c-9d95-38dfe0c8be8c\") " pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.336980 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/485df95d-da0f-498c-9d95-38dfe0c8be8c-operator-scripts\") pod \"cinder-8618-account-create-update-v8qq4\" (UID: \"485df95d-da0f-498c-9d95-38dfe0c8be8c\") " pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.361032 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks9f2\" (UniqueName: \"kubernetes.io/projected/485df95d-da0f-498c-9d95-38dfe0c8be8c-kube-api-access-ks9f2\") pod \"cinder-8618-account-create-update-v8qq4\" (UID: \"485df95d-da0f-498c-9d95-38dfe0c8be8c\") " pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.387944 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.702198 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-8618-account-create-update-v8qq4"] Jan 29 08:08:53 crc kubenswrapper[4861]: I0129 08:08:53.822921 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-n79dl"] Jan 29 08:08:53 crc kubenswrapper[4861]: W0129 08:08:53.826992 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4821b164_e444_4c00_a965_0bb0d722b944.slice/crio-b667d4b91911163ed896fdbd638f8d337fe524b756ec675a98dd382a4cdf86ac WatchSource:0}: Error finding container b667d4b91911163ed896fdbd638f8d337fe524b756ec675a98dd382a4cdf86ac: Status 404 returned error can't find the container with id b667d4b91911163ed896fdbd638f8d337fe524b756ec675a98dd382a4cdf86ac Jan 29 08:08:54 crc kubenswrapper[4861]: I0129 08:08:54.705869 4861 generic.go:334] "Generic (PLEG): container finished" podID="485df95d-da0f-498c-9d95-38dfe0c8be8c" containerID="62e32f40c16d8219cbef5c6fd50a144ec6d45dc98c52d2903d81028aab099d72" exitCode=0 Jan 29 08:08:54 crc kubenswrapper[4861]: I0129 08:08:54.705956 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8618-account-create-update-v8qq4" event={"ID":"485df95d-da0f-498c-9d95-38dfe0c8be8c","Type":"ContainerDied","Data":"62e32f40c16d8219cbef5c6fd50a144ec6d45dc98c52d2903d81028aab099d72"} Jan 29 08:08:54 crc kubenswrapper[4861]: I0129 08:08:54.706216 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8618-account-create-update-v8qq4" event={"ID":"485df95d-da0f-498c-9d95-38dfe0c8be8c","Type":"ContainerStarted","Data":"1e3031b2d5d62d87dc83cc3ab4e405f1c833a2952463d5b87ee71783b3963056"} Jan 29 08:08:54 crc kubenswrapper[4861]: I0129 08:08:54.709442 4861 generic.go:334] "Generic (PLEG): container finished" podID="4821b164-e444-4c00-a965-0bb0d722b944" containerID="72665c0fcf836acd6d33be965132f4888628d63adf4be4a5dbe3553f64331d40" exitCode=0 Jan 29 08:08:54 crc kubenswrapper[4861]: I0129 08:08:54.709489 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-n79dl" event={"ID":"4821b164-e444-4c00-a965-0bb0d722b944","Type":"ContainerDied","Data":"72665c0fcf836acd6d33be965132f4888628d63adf4be4a5dbe3553f64331d40"} Jan 29 08:08:54 crc kubenswrapper[4861]: I0129 08:08:54.709517 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-n79dl" event={"ID":"4821b164-e444-4c00-a965-0bb0d722b944","Type":"ContainerStarted","Data":"b667d4b91911163ed896fdbd638f8d337fe524b756ec675a98dd382a4cdf86ac"} Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.204288 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.210055 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.295403 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt8xs\" (UniqueName: \"kubernetes.io/projected/4821b164-e444-4c00-a965-0bb0d722b944-kube-api-access-mt8xs\") pod \"4821b164-e444-4c00-a965-0bb0d722b944\" (UID: \"4821b164-e444-4c00-a965-0bb0d722b944\") " Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.295484 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ks9f2\" (UniqueName: \"kubernetes.io/projected/485df95d-da0f-498c-9d95-38dfe0c8be8c-kube-api-access-ks9f2\") pod \"485df95d-da0f-498c-9d95-38dfe0c8be8c\" (UID: \"485df95d-da0f-498c-9d95-38dfe0c8be8c\") " Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.295598 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4821b164-e444-4c00-a965-0bb0d722b944-operator-scripts\") pod \"4821b164-e444-4c00-a965-0bb0d722b944\" (UID: \"4821b164-e444-4c00-a965-0bb0d722b944\") " Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.295680 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/485df95d-da0f-498c-9d95-38dfe0c8be8c-operator-scripts\") pod \"485df95d-da0f-498c-9d95-38dfe0c8be8c\" (UID: \"485df95d-da0f-498c-9d95-38dfe0c8be8c\") " Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.296089 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4821b164-e444-4c00-a965-0bb0d722b944-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4821b164-e444-4c00-a965-0bb0d722b944" (UID: "4821b164-e444-4c00-a965-0bb0d722b944"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.296303 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/485df95d-da0f-498c-9d95-38dfe0c8be8c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "485df95d-da0f-498c-9d95-38dfe0c8be8c" (UID: "485df95d-da0f-498c-9d95-38dfe0c8be8c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.300853 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/485df95d-da0f-498c-9d95-38dfe0c8be8c-kube-api-access-ks9f2" (OuterVolumeSpecName: "kube-api-access-ks9f2") pod "485df95d-da0f-498c-9d95-38dfe0c8be8c" (UID: "485df95d-da0f-498c-9d95-38dfe0c8be8c"). InnerVolumeSpecName "kube-api-access-ks9f2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.301271 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4821b164-e444-4c00-a965-0bb0d722b944-kube-api-access-mt8xs" (OuterVolumeSpecName: "kube-api-access-mt8xs") pod "4821b164-e444-4c00-a965-0bb0d722b944" (UID: "4821b164-e444-4c00-a965-0bb0d722b944"). InnerVolumeSpecName "kube-api-access-mt8xs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.397894 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/485df95d-da0f-498c-9d95-38dfe0c8be8c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.397928 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt8xs\" (UniqueName: \"kubernetes.io/projected/4821b164-e444-4c00-a965-0bb0d722b944-kube-api-access-mt8xs\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.397941 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ks9f2\" (UniqueName: \"kubernetes.io/projected/485df95d-da0f-498c-9d95-38dfe0c8be8c-kube-api-access-ks9f2\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.397951 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4821b164-e444-4c00-a965-0bb0d722b944-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.725706 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-n79dl" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.725696 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-n79dl" event={"ID":"4821b164-e444-4c00-a965-0bb0d722b944","Type":"ContainerDied","Data":"b667d4b91911163ed896fdbd638f8d337fe524b756ec675a98dd382a4cdf86ac"} Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.725936 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b667d4b91911163ed896fdbd638f8d337fe524b756ec675a98dd382a4cdf86ac" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.727320 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8618-account-create-update-v8qq4" event={"ID":"485df95d-da0f-498c-9d95-38dfe0c8be8c","Type":"ContainerDied","Data":"1e3031b2d5d62d87dc83cc3ab4e405f1c833a2952463d5b87ee71783b3963056"} Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.727404 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e3031b2d5d62d87dc83cc3ab4e405f1c833a2952463d5b87ee71783b3963056" Jan 29 08:08:56 crc kubenswrapper[4861]: I0129 08:08:56.727385 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8618-account-create-update-v8qq4" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.357288 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-4zgn5"] Jan 29 08:08:58 crc kubenswrapper[4861]: E0129 08:08:58.358034 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485df95d-da0f-498c-9d95-38dfe0c8be8c" containerName="mariadb-account-create-update" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.358051 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="485df95d-da0f-498c-9d95-38dfe0c8be8c" containerName="mariadb-account-create-update" Jan 29 08:08:58 crc kubenswrapper[4861]: E0129 08:08:58.358100 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4821b164-e444-4c00-a965-0bb0d722b944" containerName="mariadb-database-create" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.358111 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4821b164-e444-4c00-a965-0bb0d722b944" containerName="mariadb-database-create" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.358312 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="485df95d-da0f-498c-9d95-38dfe0c8be8c" containerName="mariadb-account-create-update" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.358338 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4821b164-e444-4c00-a965-0bb0d722b944" containerName="mariadb-database-create" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.359055 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.361009 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.361618 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.361934 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7q89j" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.370376 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-4zgn5"] Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.432911 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f79a7f94-15e1-49ff-a383-0ea07b714dd2-etc-machine-id\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.433270 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5s48\" (UniqueName: \"kubernetes.io/projected/f79a7f94-15e1-49ff-a383-0ea07b714dd2-kube-api-access-j5s48\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.433300 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-db-sync-config-data\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.433353 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-scripts\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.433505 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-combined-ca-bundle\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.433559 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-config-data\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.534687 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f79a7f94-15e1-49ff-a383-0ea07b714dd2-etc-machine-id\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.534734 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5s48\" (UniqueName: \"kubernetes.io/projected/f79a7f94-15e1-49ff-a383-0ea07b714dd2-kube-api-access-j5s48\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.534757 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-db-sync-config-data\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.534761 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f79a7f94-15e1-49ff-a383-0ea07b714dd2-etc-machine-id\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.534832 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-scripts\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.534937 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-combined-ca-bundle\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.534980 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-config-data\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.540794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-db-sync-config-data\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.540943 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-combined-ca-bundle\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.541141 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-config-data\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.542374 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-scripts\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.550909 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5s48\" (UniqueName: \"kubernetes.io/projected/f79a7f94-15e1-49ff-a383-0ea07b714dd2-kube-api-access-j5s48\") pod \"cinder-db-sync-4zgn5\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:58 crc kubenswrapper[4861]: I0129 08:08:58.683306 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:08:59 crc kubenswrapper[4861]: I0129 08:08:59.172980 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-4zgn5"] Jan 29 08:08:59 crc kubenswrapper[4861]: I0129 08:08:59.757305 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-4zgn5" event={"ID":"f79a7f94-15e1-49ff-a383-0ea07b714dd2","Type":"ContainerStarted","Data":"50a516dba109433a536773f36436a52ae2cc5dd5c13c5922cf4fe62f145d8849"} Jan 29 08:09:00 crc kubenswrapper[4861]: I0129 08:09:00.629780 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:09:00 crc kubenswrapper[4861]: I0129 08:09:00.630211 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:09:00 crc kubenswrapper[4861]: I0129 08:09:00.766258 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-4zgn5" event={"ID":"f79a7f94-15e1-49ff-a383-0ea07b714dd2","Type":"ContainerStarted","Data":"14acc896d89d6c0dc0f7622d5095819283636e42c74d015ce4b693d5d7e8beeb"} Jan 29 08:09:00 crc kubenswrapper[4861]: I0129 08:09:00.790487 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-4zgn5" podStartSLOduration=2.790468216 podStartE2EDuration="2.790468216s" podCreationTimestamp="2026-01-29 08:08:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:09:00.783638127 +0000 UTC m=+5632.455132694" watchObservedRunningTime="2026-01-29 08:09:00.790468216 +0000 UTC m=+5632.461962773" Jan 29 08:09:02 crc kubenswrapper[4861]: I0129 08:09:02.789265 4861 generic.go:334] "Generic (PLEG): container finished" podID="f79a7f94-15e1-49ff-a383-0ea07b714dd2" containerID="14acc896d89d6c0dc0f7622d5095819283636e42c74d015ce4b693d5d7e8beeb" exitCode=0 Jan 29 08:09:02 crc kubenswrapper[4861]: I0129 08:09:02.789589 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-4zgn5" event={"ID":"f79a7f94-15e1-49ff-a383-0ea07b714dd2","Type":"ContainerDied","Data":"14acc896d89d6c0dc0f7622d5095819283636e42c74d015ce4b693d5d7e8beeb"} Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.214025 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.381209 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f79a7f94-15e1-49ff-a383-0ea07b714dd2-etc-machine-id\") pod \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.381290 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-combined-ca-bundle\") pod \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.381336 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f79a7f94-15e1-49ff-a383-0ea07b714dd2-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f79a7f94-15e1-49ff-a383-0ea07b714dd2" (UID: "f79a7f94-15e1-49ff-a383-0ea07b714dd2"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.381445 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-db-sync-config-data\") pod \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.381527 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-scripts\") pod \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.381609 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5s48\" (UniqueName: \"kubernetes.io/projected/f79a7f94-15e1-49ff-a383-0ea07b714dd2-kube-api-access-j5s48\") pod \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.381695 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-config-data\") pod \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\" (UID: \"f79a7f94-15e1-49ff-a383-0ea07b714dd2\") " Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.382251 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f79a7f94-15e1-49ff-a383-0ea07b714dd2-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.387625 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-scripts" (OuterVolumeSpecName: "scripts") pod "f79a7f94-15e1-49ff-a383-0ea07b714dd2" (UID: "f79a7f94-15e1-49ff-a383-0ea07b714dd2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.388479 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f79a7f94-15e1-49ff-a383-0ea07b714dd2" (UID: "f79a7f94-15e1-49ff-a383-0ea07b714dd2"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.390730 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f79a7f94-15e1-49ff-a383-0ea07b714dd2-kube-api-access-j5s48" (OuterVolumeSpecName: "kube-api-access-j5s48") pod "f79a7f94-15e1-49ff-a383-0ea07b714dd2" (UID: "f79a7f94-15e1-49ff-a383-0ea07b714dd2"). InnerVolumeSpecName "kube-api-access-j5s48". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.414256 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f79a7f94-15e1-49ff-a383-0ea07b714dd2" (UID: "f79a7f94-15e1-49ff-a383-0ea07b714dd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.466861 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-config-data" (OuterVolumeSpecName: "config-data") pod "f79a7f94-15e1-49ff-a383-0ea07b714dd2" (UID: "f79a7f94-15e1-49ff-a383-0ea07b714dd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.487962 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5s48\" (UniqueName: \"kubernetes.io/projected/f79a7f94-15e1-49ff-a383-0ea07b714dd2-kube-api-access-j5s48\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.488000 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.488013 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.488026 4861 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.488037 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f79a7f94-15e1-49ff-a383-0ea07b714dd2-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.824232 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-4zgn5" event={"ID":"f79a7f94-15e1-49ff-a383-0ea07b714dd2","Type":"ContainerDied","Data":"50a516dba109433a536773f36436a52ae2cc5dd5c13c5922cf4fe62f145d8849"} Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.824281 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50a516dba109433a536773f36436a52ae2cc5dd5c13c5922cf4fe62f145d8849" Jan 29 08:09:04 crc kubenswrapper[4861]: I0129 08:09:04.824344 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-4zgn5" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.148161 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b5f4c7889-pb9x6"] Jan 29 08:09:05 crc kubenswrapper[4861]: E0129 08:09:05.148741 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f79a7f94-15e1-49ff-a383-0ea07b714dd2" containerName="cinder-db-sync" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.148842 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f79a7f94-15e1-49ff-a383-0ea07b714dd2" containerName="cinder-db-sync" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.149115 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f79a7f94-15e1-49ff-a383-0ea07b714dd2" containerName="cinder-db-sync" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.150038 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.163580 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b5f4c7889-pb9x6"] Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.263237 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.264928 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.268865 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.269260 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7q89j" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.269492 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.269598 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.282009 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.303549 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-dns-svc\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.303613 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.303643 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-config\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.303719 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.303859 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj9l4\" (UniqueName: \"kubernetes.io/projected/c4b37722-dbe9-4212-9c67-a28c9adc613c-kube-api-access-tj9l4\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405002 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttp6b\" (UniqueName: \"kubernetes.io/projected/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-kube-api-access-ttp6b\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405047 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405140 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-logs\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405174 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405344 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj9l4\" (UniqueName: \"kubernetes.io/projected/c4b37722-dbe9-4212-9c67-a28c9adc613c-kube-api-access-tj9l4\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405382 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-dns-svc\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405413 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405441 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405456 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-scripts\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405475 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data-custom\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405494 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-config\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.405520 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.406393 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-dns-svc\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.406467 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.406504 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-config\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.406683 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.423848 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj9l4\" (UniqueName: \"kubernetes.io/projected/c4b37722-dbe9-4212-9c67-a28c9adc613c-kube-api-access-tj9l4\") pod \"dnsmasq-dns-7b5f4c7889-pb9x6\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.506984 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-scripts\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.507037 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data-custom\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.507070 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.507120 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttp6b\" (UniqueName: \"kubernetes.io/projected/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-kube-api-access-ttp6b\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.507180 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-logs\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.507204 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.507252 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.507671 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.508139 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-logs\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.511586 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data-custom\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.512501 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-scripts\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.512788 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.514720 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.516904 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.534613 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttp6b\" (UniqueName: \"kubernetes.io/projected/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-kube-api-access-ttp6b\") pod \"cinder-api-0\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " pod="openstack/cinder-api-0" Jan 29 08:09:05 crc kubenswrapper[4861]: I0129 08:09:05.590237 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:06 crc kubenswrapper[4861]: I0129 08:09:06.029388 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b5f4c7889-pb9x6"] Jan 29 08:09:06 crc kubenswrapper[4861]: W0129 08:09:06.034853 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4b37722_dbe9_4212_9c67_a28c9adc613c.slice/crio-493cd5ce83dac3d0e51ea9545d3a7ddc35ac2e092adcca546b9b95fc94f11505 WatchSource:0}: Error finding container 493cd5ce83dac3d0e51ea9545d3a7ddc35ac2e092adcca546b9b95fc94f11505: Status 404 returned error can't find the container with id 493cd5ce83dac3d0e51ea9545d3a7ddc35ac2e092adcca546b9b95fc94f11505 Jan 29 08:09:06 crc kubenswrapper[4861]: I0129 08:09:06.130682 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:06 crc kubenswrapper[4861]: W0129 08:09:06.137269 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f3bd805_4e68_4f31_ade5_c276eb6d5c61.slice/crio-c7620e63c7c5de9607e324aae38d0cabe1232b0bec3a8c7f0f7954a2de93119a WatchSource:0}: Error finding container c7620e63c7c5de9607e324aae38d0cabe1232b0bec3a8c7f0f7954a2de93119a: Status 404 returned error can't find the container with id c7620e63c7c5de9607e324aae38d0cabe1232b0bec3a8c7f0f7954a2de93119a Jan 29 08:09:06 crc kubenswrapper[4861]: I0129 08:09:06.864334 4861 generic.go:334] "Generic (PLEG): container finished" podID="c4b37722-dbe9-4212-9c67-a28c9adc613c" containerID="c1baae1bae0e3bb6047e8af5223777de50c4aab048026e073b0799c1dd9a7a32" exitCode=0 Jan 29 08:09:06 crc kubenswrapper[4861]: I0129 08:09:06.864548 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" event={"ID":"c4b37722-dbe9-4212-9c67-a28c9adc613c","Type":"ContainerDied","Data":"c1baae1bae0e3bb6047e8af5223777de50c4aab048026e073b0799c1dd9a7a32"} Jan 29 08:09:06 crc kubenswrapper[4861]: I0129 08:09:06.864603 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" event={"ID":"c4b37722-dbe9-4212-9c67-a28c9adc613c","Type":"ContainerStarted","Data":"493cd5ce83dac3d0e51ea9545d3a7ddc35ac2e092adcca546b9b95fc94f11505"} Jan 29 08:09:06 crc kubenswrapper[4861]: I0129 08:09:06.878012 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f3bd805-4e68-4f31-ade5-c276eb6d5c61","Type":"ContainerStarted","Data":"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30"} Jan 29 08:09:06 crc kubenswrapper[4861]: I0129 08:09:06.878057 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f3bd805-4e68-4f31-ade5-c276eb6d5c61","Type":"ContainerStarted","Data":"c7620e63c7c5de9607e324aae38d0cabe1232b0bec3a8c7f0f7954a2de93119a"} Jan 29 08:09:07 crc kubenswrapper[4861]: I0129 08:09:07.431393 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:07 crc kubenswrapper[4861]: I0129 08:09:07.887198 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" event={"ID":"c4b37722-dbe9-4212-9c67-a28c9adc613c","Type":"ContainerStarted","Data":"1c7b7e1d08b7053e14c44b368a4bbf8e4b66c58864a6b18bca43de788f35670e"} Jan 29 08:09:07 crc kubenswrapper[4861]: I0129 08:09:07.887796 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:07 crc kubenswrapper[4861]: I0129 08:09:07.889218 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f3bd805-4e68-4f31-ade5-c276eb6d5c61","Type":"ContainerStarted","Data":"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e"} Jan 29 08:09:07 crc kubenswrapper[4861]: I0129 08:09:07.889982 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 08:09:07 crc kubenswrapper[4861]: I0129 08:09:07.909378 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" podStartSLOduration=2.909357356 podStartE2EDuration="2.909357356s" podCreationTimestamp="2026-01-29 08:09:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:09:07.905555617 +0000 UTC m=+5639.577050174" watchObservedRunningTime="2026-01-29 08:09:07.909357356 +0000 UTC m=+5639.580851913" Jan 29 08:09:08 crc kubenswrapper[4861]: I0129 08:09:08.896477 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerName="cinder-api" containerID="cri-o://276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e" gracePeriod=30 Jan 29 08:09:08 crc kubenswrapper[4861]: I0129 08:09:08.896425 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerName="cinder-api-log" containerID="cri-o://eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30" gracePeriod=30 Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.432013 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.621980 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data\") pod \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.622022 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data-custom\") pod \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.622048 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-scripts\") pod \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.622093 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-combined-ca-bundle\") pod \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.622141 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttp6b\" (UniqueName: \"kubernetes.io/projected/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-kube-api-access-ttp6b\") pod \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.622187 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-logs\") pod \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.622281 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-etc-machine-id\") pod \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\" (UID: \"5f3bd805-4e68-4f31-ade5-c276eb6d5c61\") " Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.622768 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5f3bd805-4e68-4f31-ade5-c276eb6d5c61" (UID: "5f3bd805-4e68-4f31-ade5-c276eb6d5c61"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.623022 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-logs" (OuterVolumeSpecName: "logs") pod "5f3bd805-4e68-4f31-ade5-c276eb6d5c61" (UID: "5f3bd805-4e68-4f31-ade5-c276eb6d5c61"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.633321 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-kube-api-access-ttp6b" (OuterVolumeSpecName: "kube-api-access-ttp6b") pod "5f3bd805-4e68-4f31-ade5-c276eb6d5c61" (UID: "5f3bd805-4e68-4f31-ade5-c276eb6d5c61"). InnerVolumeSpecName "kube-api-access-ttp6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.633410 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-scripts" (OuterVolumeSpecName: "scripts") pod "5f3bd805-4e68-4f31-ade5-c276eb6d5c61" (UID: "5f3bd805-4e68-4f31-ade5-c276eb6d5c61"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.644302 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5f3bd805-4e68-4f31-ade5-c276eb6d5c61" (UID: "5f3bd805-4e68-4f31-ade5-c276eb6d5c61"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.673087 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data" (OuterVolumeSpecName: "config-data") pod "5f3bd805-4e68-4f31-ade5-c276eb6d5c61" (UID: "5f3bd805-4e68-4f31-ade5-c276eb6d5c61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.675193 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5f3bd805-4e68-4f31-ade5-c276eb6d5c61" (UID: "5f3bd805-4e68-4f31-ade5-c276eb6d5c61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.724348 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.724382 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.724390 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.724416 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.724425 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.724436 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttp6b\" (UniqueName: \"kubernetes.io/projected/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-kube-api-access-ttp6b\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.724446 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f3bd805-4e68-4f31-ade5-c276eb6d5c61-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.907616 4861 generic.go:334] "Generic (PLEG): container finished" podID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerID="276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e" exitCode=0 Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.907688 4861 generic.go:334] "Generic (PLEG): container finished" podID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerID="eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30" exitCode=143 Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.907689 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.907654 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f3bd805-4e68-4f31-ade5-c276eb6d5c61","Type":"ContainerDied","Data":"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e"} Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.907815 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f3bd805-4e68-4f31-ade5-c276eb6d5c61","Type":"ContainerDied","Data":"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30"} Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.907829 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f3bd805-4e68-4f31-ade5-c276eb6d5c61","Type":"ContainerDied","Data":"c7620e63c7c5de9607e324aae38d0cabe1232b0bec3a8c7f0f7954a2de93119a"} Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.907845 4861 scope.go:117] "RemoveContainer" containerID="276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.927811 4861 scope.go:117] "RemoveContainer" containerID="eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.963745 4861 scope.go:117] "RemoveContainer" containerID="276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e" Jan 29 08:09:09 crc kubenswrapper[4861]: E0129 08:09:09.964396 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e\": container with ID starting with 276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e not found: ID does not exist" containerID="276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.964441 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e"} err="failed to get container status \"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e\": rpc error: code = NotFound desc = could not find container \"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e\": container with ID starting with 276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e not found: ID does not exist" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.964462 4861 scope.go:117] "RemoveContainer" containerID="eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30" Jan 29 08:09:09 crc kubenswrapper[4861]: E0129 08:09:09.966907 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30\": container with ID starting with eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30 not found: ID does not exist" containerID="eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.967001 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30"} err="failed to get container status \"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30\": rpc error: code = NotFound desc = could not find container \"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30\": container with ID starting with eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30 not found: ID does not exist" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.967044 4861 scope.go:117] "RemoveContainer" containerID="276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.967402 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e"} err="failed to get container status \"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e\": rpc error: code = NotFound desc = could not find container \"276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e\": container with ID starting with 276837bf634b03807c69e3fccb2cedfd14978a4f1360fc384eba7b36501ca72e not found: ID does not exist" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.967436 4861 scope.go:117] "RemoveContainer" containerID="eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.967917 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30"} err="failed to get container status \"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30\": rpc error: code = NotFound desc = could not find container \"eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30\": container with ID starting with eb92c99f5a86416e3cdf233877584685eb55cc865faafc9cf1e2689de2a71c30 not found: ID does not exist" Jan 29 08:09:09 crc kubenswrapper[4861]: I0129 08:09:09.974683 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.000434 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.020251 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:10 crc kubenswrapper[4861]: E0129 08:09:10.020709 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerName="cinder-api" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.020727 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerName="cinder-api" Jan 29 08:09:10 crc kubenswrapper[4861]: E0129 08:09:10.020757 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerName="cinder-api-log" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.020767 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerName="cinder-api-log" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.020956 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerName="cinder-api-log" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.020989 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" containerName="cinder-api" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.022164 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.026143 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.027153 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.027383 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.027395 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7q89j" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.028714 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.028901 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.033184 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130295 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dc1872d-9331-4bb0-883d-9568bf89a107-logs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130741 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130773 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130821 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8ps5\" (UniqueName: \"kubernetes.io/projected/1dc1872d-9331-4bb0-883d-9568bf89a107-kube-api-access-v8ps5\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130847 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-scripts\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130878 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data-custom\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130895 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130937 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.130963 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1dc1872d-9331-4bb0-883d-9568bf89a107-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.232781 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dc1872d-9331-4bb0-883d-9568bf89a107-logs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.232864 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.232883 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.232944 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8ps5\" (UniqueName: \"kubernetes.io/projected/1dc1872d-9331-4bb0-883d-9568bf89a107-kube-api-access-v8ps5\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.233004 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-scripts\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.233030 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data-custom\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.233055 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.233107 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.233150 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1dc1872d-9331-4bb0-883d-9568bf89a107-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.234386 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dc1872d-9331-4bb0-883d-9568bf89a107-logs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.234565 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1dc1872d-9331-4bb0-883d-9568bf89a107-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.237090 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.237477 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data-custom\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.238798 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.241011 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-scripts\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.243024 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.243621 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.253947 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8ps5\" (UniqueName: \"kubernetes.io/projected/1dc1872d-9331-4bb0-883d-9568bf89a107-kube-api-access-v8ps5\") pod \"cinder-api-0\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.348356 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:10 crc kubenswrapper[4861]: I0129 08:09:10.910802 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:11 crc kubenswrapper[4861]: I0129 08:09:11.145384 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f3bd805-4e68-4f31-ade5-c276eb6d5c61" path="/var/lib/kubelet/pods/5f3bd805-4e68-4f31-ade5-c276eb6d5c61/volumes" Jan 29 08:09:11 crc kubenswrapper[4861]: I0129 08:09:11.936573 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1dc1872d-9331-4bb0-883d-9568bf89a107","Type":"ContainerStarted","Data":"073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354"} Jan 29 08:09:11 crc kubenswrapper[4861]: I0129 08:09:11.937165 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1dc1872d-9331-4bb0-883d-9568bf89a107","Type":"ContainerStarted","Data":"a53ac11c2c1e71ebe148b91985a4734fc9577024f16b8de44b72ea3b20bcf646"} Jan 29 08:09:12 crc kubenswrapper[4861]: I0129 08:09:12.952723 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1dc1872d-9331-4bb0-883d-9568bf89a107","Type":"ContainerStarted","Data":"6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e"} Jan 29 08:09:12 crc kubenswrapper[4861]: I0129 08:09:12.953275 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 08:09:12 crc kubenswrapper[4861]: I0129 08:09:12.999549 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.99951732 podStartE2EDuration="3.99951732s" podCreationTimestamp="2026-01-29 08:09:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:09:12.983652804 +0000 UTC m=+5644.655147441" watchObservedRunningTime="2026-01-29 08:09:12.99951732 +0000 UTC m=+5644.671011917" Jan 29 08:09:15 crc kubenswrapper[4861]: I0129 08:09:15.516380 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:09:15 crc kubenswrapper[4861]: I0129 08:09:15.626837 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75fd975745-6qzgv"] Jan 29 08:09:15 crc kubenswrapper[4861]: I0129 08:09:15.627205 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" podUID="ce9850c6-f728-4e4a-8f65-79edde1e5889" containerName="dnsmasq-dns" containerID="cri-o://d39a03fa46ac2e6d51b0b1a3e8b8e7b806aeeec60b7bede796ec9978104a0360" gracePeriod=10 Jan 29 08:09:15 crc kubenswrapper[4861]: I0129 08:09:15.989382 4861 generic.go:334] "Generic (PLEG): container finished" podID="ce9850c6-f728-4e4a-8f65-79edde1e5889" containerID="d39a03fa46ac2e6d51b0b1a3e8b8e7b806aeeec60b7bede796ec9978104a0360" exitCode=0 Jan 29 08:09:15 crc kubenswrapper[4861]: I0129 08:09:15.989492 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" event={"ID":"ce9850c6-f728-4e4a-8f65-79edde1e5889","Type":"ContainerDied","Data":"d39a03fa46ac2e6d51b0b1a3e8b8e7b806aeeec60b7bede796ec9978104a0360"} Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.087044 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.166258 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-dns-svc\") pod \"ce9850c6-f728-4e4a-8f65-79edde1e5889\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.166352 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8fc5\" (UniqueName: \"kubernetes.io/projected/ce9850c6-f728-4e4a-8f65-79edde1e5889-kube-api-access-s8fc5\") pod \"ce9850c6-f728-4e4a-8f65-79edde1e5889\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.166377 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-sb\") pod \"ce9850c6-f728-4e4a-8f65-79edde1e5889\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.166446 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-nb\") pod \"ce9850c6-f728-4e4a-8f65-79edde1e5889\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.166480 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-config\") pod \"ce9850c6-f728-4e4a-8f65-79edde1e5889\" (UID: \"ce9850c6-f728-4e4a-8f65-79edde1e5889\") " Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.183122 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce9850c6-f728-4e4a-8f65-79edde1e5889-kube-api-access-s8fc5" (OuterVolumeSpecName: "kube-api-access-s8fc5") pod "ce9850c6-f728-4e4a-8f65-79edde1e5889" (UID: "ce9850c6-f728-4e4a-8f65-79edde1e5889"). InnerVolumeSpecName "kube-api-access-s8fc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.222953 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ce9850c6-f728-4e4a-8f65-79edde1e5889" (UID: "ce9850c6-f728-4e4a-8f65-79edde1e5889"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.224847 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ce9850c6-f728-4e4a-8f65-79edde1e5889" (UID: "ce9850c6-f728-4e4a-8f65-79edde1e5889"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.226564 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ce9850c6-f728-4e4a-8f65-79edde1e5889" (UID: "ce9850c6-f728-4e4a-8f65-79edde1e5889"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.228149 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-config" (OuterVolumeSpecName: "config") pod "ce9850c6-f728-4e4a-8f65-79edde1e5889" (UID: "ce9850c6-f728-4e4a-8f65-79edde1e5889"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.268208 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.268239 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.268249 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8fc5\" (UniqueName: \"kubernetes.io/projected/ce9850c6-f728-4e4a-8f65-79edde1e5889-kube-api-access-s8fc5\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.268259 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:16 crc kubenswrapper[4861]: I0129 08:09:16.268267 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ce9850c6-f728-4e4a-8f65-79edde1e5889-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:17 crc kubenswrapper[4861]: I0129 08:09:17.007047 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" event={"ID":"ce9850c6-f728-4e4a-8f65-79edde1e5889","Type":"ContainerDied","Data":"14f8dde3d090c884f340c06473b0e27ec2fe6aab36160dea7ed8ca9cdea41ca1"} Jan 29 08:09:17 crc kubenswrapper[4861]: I0129 08:09:17.007338 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75fd975745-6qzgv" Jan 29 08:09:17 crc kubenswrapper[4861]: I0129 08:09:17.007543 4861 scope.go:117] "RemoveContainer" containerID="d39a03fa46ac2e6d51b0b1a3e8b8e7b806aeeec60b7bede796ec9978104a0360" Jan 29 08:09:17 crc kubenswrapper[4861]: I0129 08:09:17.050650 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75fd975745-6qzgv"] Jan 29 08:09:17 crc kubenswrapper[4861]: I0129 08:09:17.054028 4861 scope.go:117] "RemoveContainer" containerID="375ff8ec1f3a9043eab722981545c37818872d305af322159deeb58d8fb612be" Jan 29 08:09:17 crc kubenswrapper[4861]: I0129 08:09:17.058763 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75fd975745-6qzgv"] Jan 29 08:09:17 crc kubenswrapper[4861]: I0129 08:09:17.135803 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce9850c6-f728-4e4a-8f65-79edde1e5889" path="/var/lib/kubelet/pods/ce9850c6-f728-4e4a-8f65-79edde1e5889/volumes" Jan 29 08:09:22 crc kubenswrapper[4861]: I0129 08:09:22.113033 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 29 08:09:26 crc kubenswrapper[4861]: I0129 08:09:26.084495 4861 scope.go:117] "RemoveContainer" containerID="74cfa304a062c06376023b0dc73208636f05cb2d84b31845ae51644115b57b51" Jan 29 08:09:30 crc kubenswrapper[4861]: I0129 08:09:30.630116 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:09:30 crc kubenswrapper[4861]: I0129 08:09:30.631165 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.275546 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:39 crc kubenswrapper[4861]: E0129 08:09:39.276276 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce9850c6-f728-4e4a-8f65-79edde1e5889" containerName="init" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.276288 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce9850c6-f728-4e4a-8f65-79edde1e5889" containerName="init" Jan 29 08:09:39 crc kubenswrapper[4861]: E0129 08:09:39.276302 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce9850c6-f728-4e4a-8f65-79edde1e5889" containerName="dnsmasq-dns" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.276307 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce9850c6-f728-4e4a-8f65-79edde1e5889" containerName="dnsmasq-dns" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.276457 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce9850c6-f728-4e4a-8f65-79edde1e5889" containerName="dnsmasq-dns" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.277264 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.280395 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.346643 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.368674 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/16eb41f3-e2b7-4b10-94f4-876f35da0258-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.368733 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.368922 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.369130 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrgn2\" (UniqueName: \"kubernetes.io/projected/16eb41f3-e2b7-4b10-94f4-876f35da0258-kube-api-access-xrgn2\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.369289 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.369401 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-scripts\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.470434 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.470498 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrgn2\" (UniqueName: \"kubernetes.io/projected/16eb41f3-e2b7-4b10-94f4-876f35da0258-kube-api-access-xrgn2\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.470533 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.470561 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-scripts\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.470618 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/16eb41f3-e2b7-4b10-94f4-876f35da0258-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.470641 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.470735 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/16eb41f3-e2b7-4b10-94f4-876f35da0258-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.477529 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.478702 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.480973 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.488650 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-scripts\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.496315 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrgn2\" (UniqueName: \"kubernetes.io/projected/16eb41f3-e2b7-4b10-94f4-876f35da0258-kube-api-access-xrgn2\") pod \"cinder-scheduler-0\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:39 crc kubenswrapper[4861]: I0129 08:09:39.593101 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 08:09:40 crc kubenswrapper[4861]: I0129 08:09:40.092164 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:40 crc kubenswrapper[4861]: I0129 08:09:40.235546 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"16eb41f3-e2b7-4b10-94f4-876f35da0258","Type":"ContainerStarted","Data":"af8c367907411bff1bd63a89a787489051676cb0e3de022eba31a02f4ae5733d"} Jan 29 08:09:40 crc kubenswrapper[4861]: I0129 08:09:40.552301 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:40 crc kubenswrapper[4861]: I0129 08:09:40.552554 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerName="cinder-api-log" containerID="cri-o://073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354" gracePeriod=30 Jan 29 08:09:40 crc kubenswrapper[4861]: I0129 08:09:40.552660 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerName="cinder-api" containerID="cri-o://6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e" gracePeriod=30 Jan 29 08:09:41 crc kubenswrapper[4861]: I0129 08:09:41.249091 4861 generic.go:334] "Generic (PLEG): container finished" podID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerID="073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354" exitCode=143 Jan 29 08:09:41 crc kubenswrapper[4861]: I0129 08:09:41.249402 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1dc1872d-9331-4bb0-883d-9568bf89a107","Type":"ContainerDied","Data":"073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354"} Jan 29 08:09:41 crc kubenswrapper[4861]: I0129 08:09:41.252472 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"16eb41f3-e2b7-4b10-94f4-876f35da0258","Type":"ContainerStarted","Data":"71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb"} Jan 29 08:09:42 crc kubenswrapper[4861]: I0129 08:09:42.263278 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"16eb41f3-e2b7-4b10-94f4-876f35da0258","Type":"ContainerStarted","Data":"7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3"} Jan 29 08:09:42 crc kubenswrapper[4861]: I0129 08:09:42.296677 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.296655341 podStartE2EDuration="3.296655341s" podCreationTimestamp="2026-01-29 08:09:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:09:42.295051869 +0000 UTC m=+5673.966546496" watchObservedRunningTime="2026-01-29 08:09:42.296655341 +0000 UTC m=+5673.968149928" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.208299 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.288127 4861 generic.go:334] "Generic (PLEG): container finished" podID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerID="6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e" exitCode=0 Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.288178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1dc1872d-9331-4bb0-883d-9568bf89a107","Type":"ContainerDied","Data":"6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e"} Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.288210 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1dc1872d-9331-4bb0-883d-9568bf89a107","Type":"ContainerDied","Data":"a53ac11c2c1e71ebe148b91985a4734fc9577024f16b8de44b72ea3b20bcf646"} Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.288233 4861 scope.go:117] "RemoveContainer" containerID="6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.288272 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.318052 4861 scope.go:117] "RemoveContainer" containerID="073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.338492 4861 scope.go:117] "RemoveContainer" containerID="6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e" Jan 29 08:09:44 crc kubenswrapper[4861]: E0129 08:09:44.340050 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e\": container with ID starting with 6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e not found: ID does not exist" containerID="6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.340198 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e"} err="failed to get container status \"6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e\": rpc error: code = NotFound desc = could not find container \"6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e\": container with ID starting with 6163b2de0259d16849f391fd7b46ad5b907431aaf899ae8d050bd7b27760672e not found: ID does not exist" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.340232 4861 scope.go:117] "RemoveContainer" containerID="073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354" Jan 29 08:09:44 crc kubenswrapper[4861]: E0129 08:09:44.341151 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354\": container with ID starting with 073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354 not found: ID does not exist" containerID="073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.341193 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354"} err="failed to get container status \"073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354\": rpc error: code = NotFound desc = could not find container \"073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354\": container with ID starting with 073ba1b8c0355104ad4bc08125c635333978196e548f585ca40a97bd2b1af354 not found: ID does not exist" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371091 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-internal-tls-certs\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371182 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-combined-ca-bundle\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371252 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dc1872d-9331-4bb0-883d-9568bf89a107-logs\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371314 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1dc1872d-9331-4bb0-883d-9568bf89a107-etc-machine-id\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371338 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-scripts\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371362 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data-custom\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371389 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-public-tls-certs\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371435 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.371490 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8ps5\" (UniqueName: \"kubernetes.io/projected/1dc1872d-9331-4bb0-883d-9568bf89a107-kube-api-access-v8ps5\") pod \"1dc1872d-9331-4bb0-883d-9568bf89a107\" (UID: \"1dc1872d-9331-4bb0-883d-9568bf89a107\") " Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.372513 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1dc1872d-9331-4bb0-883d-9568bf89a107-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.375717 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1dc1872d-9331-4bb0-883d-9568bf89a107-logs" (OuterVolumeSpecName: "logs") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.377889 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dc1872d-9331-4bb0-883d-9568bf89a107-kube-api-access-v8ps5" (OuterVolumeSpecName: "kube-api-access-v8ps5") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "kube-api-access-v8ps5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.379399 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.398445 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-scripts" (OuterVolumeSpecName: "scripts") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.403739 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.424900 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data" (OuterVolumeSpecName: "config-data") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.432620 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.433828 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1dc1872d-9331-4bb0-883d-9568bf89a107" (UID: "1dc1872d-9331-4bb0-883d-9568bf89a107"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475271 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dc1872d-9331-4bb0-883d-9568bf89a107-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475312 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1dc1872d-9331-4bb0-883d-9568bf89a107-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475329 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475341 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475366 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475376 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475386 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8ps5\" (UniqueName: \"kubernetes.io/projected/1dc1872d-9331-4bb0-883d-9568bf89a107-kube-api-access-v8ps5\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475397 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.475409 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dc1872d-9331-4bb0-883d-9568bf89a107-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.593755 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.623357 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.630432 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.644357 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:44 crc kubenswrapper[4861]: E0129 08:09:44.644683 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerName="cinder-api" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.644701 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerName="cinder-api" Jan 29 08:09:44 crc kubenswrapper[4861]: E0129 08:09:44.644733 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerName="cinder-api-log" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.644740 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerName="cinder-api-log" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.644889 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerName="cinder-api" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.644905 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" containerName="cinder-api-log" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.645850 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.648013 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.695406 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.696289 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.703089 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.799424 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.799502 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-logs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.799532 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.799735 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fsvz\" (UniqueName: \"kubernetes.io/projected/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-kube-api-access-2fsvz\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.799811 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.799849 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-config-data\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.799898 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-config-data-custom\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.799980 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-scripts\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.800383 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.901969 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-logs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902035 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902097 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fsvz\" (UniqueName: \"kubernetes.io/projected/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-kube-api-access-2fsvz\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902153 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902232 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-config-data\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902283 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-config-data-custom\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902385 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-scripts\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902449 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902558 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902812 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.902949 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-logs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.906307 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.906636 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.908131 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-config-data\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.908223 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-config-data-custom\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.909121 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-scripts\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.911461 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:44 crc kubenswrapper[4861]: I0129 08:09:44.917499 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fsvz\" (UniqueName: \"kubernetes.io/projected/1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1-kube-api-access-2fsvz\") pod \"cinder-api-0\" (UID: \"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1\") " pod="openstack/cinder-api-0" Jan 29 08:09:45 crc kubenswrapper[4861]: I0129 08:09:45.005242 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 08:09:45 crc kubenswrapper[4861]: I0129 08:09:45.129354 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dc1872d-9331-4bb0-883d-9568bf89a107" path="/var/lib/kubelet/pods/1dc1872d-9331-4bb0-883d-9568bf89a107/volumes" Jan 29 08:09:45 crc kubenswrapper[4861]: I0129 08:09:45.533813 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 08:09:46 crc kubenswrapper[4861]: I0129 08:09:46.310098 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1","Type":"ContainerStarted","Data":"b251b8b96abd5cf589ebee1d2f37fcdbc0d03d4fc4d3e17f917f09a15254cbd9"} Jan 29 08:09:46 crc kubenswrapper[4861]: I0129 08:09:46.311004 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1","Type":"ContainerStarted","Data":"e24bbcb0aca55cd19bd7447fbaad897a829a58ed20320f84c06795763d0044e5"} Jan 29 08:09:47 crc kubenswrapper[4861]: I0129 08:09:47.324046 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1","Type":"ContainerStarted","Data":"fb5b924c365e01dadef1af296dd4d651fe9e925554728c457cfb8c64e8c20353"} Jan 29 08:09:47 crc kubenswrapper[4861]: I0129 08:09:47.324390 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 08:09:47 crc kubenswrapper[4861]: I0129 08:09:47.351021 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.350993635 podStartE2EDuration="3.350993635s" podCreationTimestamp="2026-01-29 08:09:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:09:47.344891045 +0000 UTC m=+5679.016385692" watchObservedRunningTime="2026-01-29 08:09:47.350993635 +0000 UTC m=+5679.022488232" Jan 29 08:09:49 crc kubenswrapper[4861]: I0129 08:09:49.793846 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 08:09:49 crc kubenswrapper[4861]: I0129 08:09:49.853702 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:50 crc kubenswrapper[4861]: I0129 08:09:50.357336 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerName="cinder-scheduler" containerID="cri-o://71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb" gracePeriod=30 Jan 29 08:09:50 crc kubenswrapper[4861]: I0129 08:09:50.357423 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerName="probe" containerID="cri-o://7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3" gracePeriod=30 Jan 29 08:09:51 crc kubenswrapper[4861]: I0129 08:09:51.375859 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"16eb41f3-e2b7-4b10-94f4-876f35da0258","Type":"ContainerDied","Data":"7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3"} Jan 29 08:09:51 crc kubenswrapper[4861]: I0129 08:09:51.375813 4861 generic.go:334] "Generic (PLEG): container finished" podID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerID="7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3" exitCode=0 Jan 29 08:09:51 crc kubenswrapper[4861]: I0129 08:09:51.947911 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.046065 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrgn2\" (UniqueName: \"kubernetes.io/projected/16eb41f3-e2b7-4b10-94f4-876f35da0258-kube-api-access-xrgn2\") pod \"16eb41f3-e2b7-4b10-94f4-876f35da0258\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.046169 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-combined-ca-bundle\") pod \"16eb41f3-e2b7-4b10-94f4-876f35da0258\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.046247 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data\") pod \"16eb41f3-e2b7-4b10-94f4-876f35da0258\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.046455 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/16eb41f3-e2b7-4b10-94f4-876f35da0258-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "16eb41f3-e2b7-4b10-94f4-876f35da0258" (UID: "16eb41f3-e2b7-4b10-94f4-876f35da0258"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.046313 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/16eb41f3-e2b7-4b10-94f4-876f35da0258-etc-machine-id\") pod \"16eb41f3-e2b7-4b10-94f4-876f35da0258\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.047159 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-scripts\") pod \"16eb41f3-e2b7-4b10-94f4-876f35da0258\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.047192 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data-custom\") pod \"16eb41f3-e2b7-4b10-94f4-876f35da0258\" (UID: \"16eb41f3-e2b7-4b10-94f4-876f35da0258\") " Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.047620 4861 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/16eb41f3-e2b7-4b10-94f4-876f35da0258-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.053249 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16eb41f3-e2b7-4b10-94f4-876f35da0258-kube-api-access-xrgn2" (OuterVolumeSpecName: "kube-api-access-xrgn2") pod "16eb41f3-e2b7-4b10-94f4-876f35da0258" (UID: "16eb41f3-e2b7-4b10-94f4-876f35da0258"). InnerVolumeSpecName "kube-api-access-xrgn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.055277 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-scripts" (OuterVolumeSpecName: "scripts") pod "16eb41f3-e2b7-4b10-94f4-876f35da0258" (UID: "16eb41f3-e2b7-4b10-94f4-876f35da0258"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.055747 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "16eb41f3-e2b7-4b10-94f4-876f35da0258" (UID: "16eb41f3-e2b7-4b10-94f4-876f35da0258"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.097720 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16eb41f3-e2b7-4b10-94f4-876f35da0258" (UID: "16eb41f3-e2b7-4b10-94f4-876f35da0258"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.142434 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data" (OuterVolumeSpecName: "config-data") pod "16eb41f3-e2b7-4b10-94f4-876f35da0258" (UID: "16eb41f3-e2b7-4b10-94f4-876f35da0258"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.149480 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.149685 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.149763 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrgn2\" (UniqueName: \"kubernetes.io/projected/16eb41f3-e2b7-4b10-94f4-876f35da0258-kube-api-access-xrgn2\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.149826 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.149887 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16eb41f3-e2b7-4b10-94f4-876f35da0258-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.384663 4861 generic.go:334] "Generic (PLEG): container finished" podID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerID="71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb" exitCode=0 Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.384699 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"16eb41f3-e2b7-4b10-94f4-876f35da0258","Type":"ContainerDied","Data":"71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb"} Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.384722 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"16eb41f3-e2b7-4b10-94f4-876f35da0258","Type":"ContainerDied","Data":"af8c367907411bff1bd63a89a787489051676cb0e3de022eba31a02f4ae5733d"} Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.384738 4861 scope.go:117] "RemoveContainer" containerID="7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.384837 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.412187 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.419283 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.419454 4861 scope.go:117] "RemoveContainer" containerID="71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.441326 4861 scope.go:117] "RemoveContainer" containerID="7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3" Jan 29 08:09:52 crc kubenswrapper[4861]: E0129 08:09:52.441846 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3\": container with ID starting with 7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3 not found: ID does not exist" containerID="7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.441889 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3"} err="failed to get container status \"7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3\": rpc error: code = NotFound desc = could not find container \"7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3\": container with ID starting with 7c060d715b71b8b3625b02a49251237dc3e7a2c7be01ffd2281745b47664ddf3 not found: ID does not exist" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.441912 4861 scope.go:117] "RemoveContainer" containerID="71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb" Jan 29 08:09:52 crc kubenswrapper[4861]: E0129 08:09:52.442329 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb\": container with ID starting with 71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb not found: ID does not exist" containerID="71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.442456 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb"} err="failed to get container status \"71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb\": rpc error: code = NotFound desc = could not find container \"71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb\": container with ID starting with 71669f7cf914a275968b1b01b7daa84f8d9e4dc0e2b4e0e18ebabb422bf415fb not found: ID does not exist" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.445956 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:52 crc kubenswrapper[4861]: E0129 08:09:52.446501 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerName="probe" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.446637 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerName="probe" Jan 29 08:09:52 crc kubenswrapper[4861]: E0129 08:09:52.446757 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerName="cinder-scheduler" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.446827 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerName="cinder-scheduler" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.447198 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerName="probe" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.447309 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" containerName="cinder-scheduler" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.448305 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.451763 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.454063 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.557130 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-config-data\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.557399 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.557575 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.557705 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-scripts\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.557812 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csscw\" (UniqueName: \"kubernetes.io/projected/0139c78d-25f6-42a2-be1f-49a40decaaae-kube-api-access-csscw\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.557936 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0139c78d-25f6-42a2-be1f-49a40decaaae-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.658872 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.658947 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.658997 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csscw\" (UniqueName: \"kubernetes.io/projected/0139c78d-25f6-42a2-be1f-49a40decaaae-kube-api-access-csscw\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.659016 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-scripts\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.659059 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0139c78d-25f6-42a2-be1f-49a40decaaae-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.659198 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-config-data\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.659535 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0139c78d-25f6-42a2-be1f-49a40decaaae-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.665424 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-config-data\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.666234 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-scripts\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.668780 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.669788 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0139c78d-25f6-42a2-be1f-49a40decaaae-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.676644 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csscw\" (UniqueName: \"kubernetes.io/projected/0139c78d-25f6-42a2-be1f-49a40decaaae-kube-api-access-csscw\") pod \"cinder-scheduler-0\" (UID: \"0139c78d-25f6-42a2-be1f-49a40decaaae\") " pod="openstack/cinder-scheduler-0" Jan 29 08:09:52 crc kubenswrapper[4861]: I0129 08:09:52.771872 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 08:09:53 crc kubenswrapper[4861]: I0129 08:09:53.058701 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 08:09:53 crc kubenswrapper[4861]: I0129 08:09:53.128679 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16eb41f3-e2b7-4b10-94f4-876f35da0258" path="/var/lib/kubelet/pods/16eb41f3-e2b7-4b10-94f4-876f35da0258/volumes" Jan 29 08:09:53 crc kubenswrapper[4861]: I0129 08:09:53.398784 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0139c78d-25f6-42a2-be1f-49a40decaaae","Type":"ContainerStarted","Data":"876052f1094ef29ca15a1ebecb4a48820630ff7fd97cc87e337ddd1a98730545"} Jan 29 08:09:54 crc kubenswrapper[4861]: I0129 08:09:54.456912 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0139c78d-25f6-42a2-be1f-49a40decaaae","Type":"ContainerStarted","Data":"865d1906a899947fad67f99d625a6ac0d65258055846d4360a8416d3f92ca6a5"} Jan 29 08:09:54 crc kubenswrapper[4861]: I0129 08:09:54.457240 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0139c78d-25f6-42a2-be1f-49a40decaaae","Type":"ContainerStarted","Data":"36f5712c1169f000e19185d8c38b90030a88bdfc10065bbeda53d031cd05128a"} Jan 29 08:09:54 crc kubenswrapper[4861]: I0129 08:09:54.482066 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.482038704 podStartE2EDuration="2.482038704s" podCreationTimestamp="2026-01-29 08:09:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:09:54.478139242 +0000 UTC m=+5686.149633809" watchObservedRunningTime="2026-01-29 08:09:54.482038704 +0000 UTC m=+5686.153533271" Jan 29 08:09:56 crc kubenswrapper[4861]: I0129 08:09:56.903513 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 29 08:09:57 crc kubenswrapper[4861]: I0129 08:09:57.773002 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 08:10:00 crc kubenswrapper[4861]: I0129 08:10:00.630377 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:10:00 crc kubenswrapper[4861]: I0129 08:10:00.630765 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:10:00 crc kubenswrapper[4861]: I0129 08:10:00.630831 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:10:00 crc kubenswrapper[4861]: I0129 08:10:00.631920 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:10:00 crc kubenswrapper[4861]: I0129 08:10:00.632037 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" gracePeriod=600 Jan 29 08:10:00 crc kubenswrapper[4861]: E0129 08:10:00.771963 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:10:01 crc kubenswrapper[4861]: I0129 08:10:01.531536 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" exitCode=0 Jan 29 08:10:01 crc kubenswrapper[4861]: I0129 08:10:01.531613 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa"} Jan 29 08:10:01 crc kubenswrapper[4861]: I0129 08:10:01.531704 4861 scope.go:117] "RemoveContainer" containerID="f12c7cd3a0871c191ad7ec4bd142001b746849f696737470eed8fe923ec11fff" Jan 29 08:10:01 crc kubenswrapper[4861]: I0129 08:10:01.532637 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:10:01 crc kubenswrapper[4861]: E0129 08:10:01.533142 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:10:02 crc kubenswrapper[4861]: I0129 08:10:02.985049 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.514456 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-zt52c"] Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.516290 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zt52c" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.535371 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-zt52c"] Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.616715 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-9082-account-create-update-82jff"] Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.617864 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.621080 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.627038 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-9082-account-create-update-82jff"] Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.664872 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24h4l\" (UniqueName: \"kubernetes.io/projected/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-kube-api-access-24h4l\") pod \"glance-db-create-zt52c\" (UID: \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\") " pod="openstack/glance-db-create-zt52c" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.665589 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-operator-scripts\") pod \"glance-db-create-zt52c\" (UID: \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\") " pod="openstack/glance-db-create-zt52c" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.767527 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24h4l\" (UniqueName: \"kubernetes.io/projected/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-kube-api-access-24h4l\") pod \"glance-db-create-zt52c\" (UID: \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\") " pod="openstack/glance-db-create-zt52c" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.767622 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnmwc\" (UniqueName: \"kubernetes.io/projected/b9c11f12-732d-470e-b8fa-29cbf4d977fb-kube-api-access-rnmwc\") pod \"glance-9082-account-create-update-82jff\" (UID: \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\") " pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.767726 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c11f12-732d-470e-b8fa-29cbf4d977fb-operator-scripts\") pod \"glance-9082-account-create-update-82jff\" (UID: \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\") " pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.767754 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-operator-scripts\") pod \"glance-db-create-zt52c\" (UID: \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\") " pod="openstack/glance-db-create-zt52c" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.768433 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-operator-scripts\") pod \"glance-db-create-zt52c\" (UID: \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\") " pod="openstack/glance-db-create-zt52c" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.809896 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24h4l\" (UniqueName: \"kubernetes.io/projected/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-kube-api-access-24h4l\") pod \"glance-db-create-zt52c\" (UID: \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\") " pod="openstack/glance-db-create-zt52c" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.869164 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c11f12-732d-470e-b8fa-29cbf4d977fb-operator-scripts\") pod \"glance-9082-account-create-update-82jff\" (UID: \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\") " pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.869544 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnmwc\" (UniqueName: \"kubernetes.io/projected/b9c11f12-732d-470e-b8fa-29cbf4d977fb-kube-api-access-rnmwc\") pod \"glance-9082-account-create-update-82jff\" (UID: \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\") " pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.869877 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c11f12-732d-470e-b8fa-29cbf4d977fb-operator-scripts\") pod \"glance-9082-account-create-update-82jff\" (UID: \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\") " pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.884816 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnmwc\" (UniqueName: \"kubernetes.io/projected/b9c11f12-732d-470e-b8fa-29cbf4d977fb-kube-api-access-rnmwc\") pod \"glance-9082-account-create-update-82jff\" (UID: \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\") " pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.888893 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zt52c" Jan 29 08:10:03 crc kubenswrapper[4861]: I0129 08:10:03.937595 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:04 crc kubenswrapper[4861]: I0129 08:10:04.329220 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-zt52c"] Jan 29 08:10:04 crc kubenswrapper[4861]: W0129 08:10:04.335829 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36d0e0cc_132a_48ad_83cc_15c5ffda48e1.slice/crio-e6eae762062c61f7916a7333b634dafbaa27b6794c29d30e7c972becb2f5ba9e WatchSource:0}: Error finding container e6eae762062c61f7916a7333b634dafbaa27b6794c29d30e7c972becb2f5ba9e: Status 404 returned error can't find the container with id e6eae762062c61f7916a7333b634dafbaa27b6794c29d30e7c972becb2f5ba9e Jan 29 08:10:04 crc kubenswrapper[4861]: I0129 08:10:04.393215 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-9082-account-create-update-82jff"] Jan 29 08:10:04 crc kubenswrapper[4861]: I0129 08:10:04.584617 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-9082-account-create-update-82jff" event={"ID":"b9c11f12-732d-470e-b8fa-29cbf4d977fb","Type":"ContainerStarted","Data":"441c2ee0f6d03a6d2320f4d04b8cccd02ea0e54d69bda0b008f651022bf5bf66"} Jan 29 08:10:04 crc kubenswrapper[4861]: I0129 08:10:04.584944 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-9082-account-create-update-82jff" event={"ID":"b9c11f12-732d-470e-b8fa-29cbf4d977fb","Type":"ContainerStarted","Data":"472b8d631c7f88f476207b98a12993b177375b77d90cec4c0ff2909dc64aacc8"} Jan 29 08:10:04 crc kubenswrapper[4861]: I0129 08:10:04.586612 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zt52c" event={"ID":"36d0e0cc-132a-48ad-83cc-15c5ffda48e1","Type":"ContainerStarted","Data":"c52e6c2544058ee6c149c5574f3bd6f297f8a376a4a929bddd0eb95ffe91fd29"} Jan 29 08:10:04 crc kubenswrapper[4861]: I0129 08:10:04.586643 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zt52c" event={"ID":"36d0e0cc-132a-48ad-83cc-15c5ffda48e1","Type":"ContainerStarted","Data":"e6eae762062c61f7916a7333b634dafbaa27b6794c29d30e7c972becb2f5ba9e"} Jan 29 08:10:04 crc kubenswrapper[4861]: I0129 08:10:04.602792 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-9082-account-create-update-82jff" podStartSLOduration=1.60277642 podStartE2EDuration="1.60277642s" podCreationTimestamp="2026-01-29 08:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:04.597710587 +0000 UTC m=+5696.269205144" watchObservedRunningTime="2026-01-29 08:10:04.60277642 +0000 UTC m=+5696.274270977" Jan 29 08:10:04 crc kubenswrapper[4861]: I0129 08:10:04.618397 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-zt52c" podStartSLOduration=1.618377528 podStartE2EDuration="1.618377528s" podCreationTimestamp="2026-01-29 08:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:04.616604062 +0000 UTC m=+5696.288098629" watchObservedRunningTime="2026-01-29 08:10:04.618377528 +0000 UTC m=+5696.289872085" Jan 29 08:10:05 crc kubenswrapper[4861]: I0129 08:10:05.599738 4861 generic.go:334] "Generic (PLEG): container finished" podID="b9c11f12-732d-470e-b8fa-29cbf4d977fb" containerID="441c2ee0f6d03a6d2320f4d04b8cccd02ea0e54d69bda0b008f651022bf5bf66" exitCode=0 Jan 29 08:10:05 crc kubenswrapper[4861]: I0129 08:10:05.599798 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-9082-account-create-update-82jff" event={"ID":"b9c11f12-732d-470e-b8fa-29cbf4d977fb","Type":"ContainerDied","Data":"441c2ee0f6d03a6d2320f4d04b8cccd02ea0e54d69bda0b008f651022bf5bf66"} Jan 29 08:10:05 crc kubenswrapper[4861]: I0129 08:10:05.603672 4861 generic.go:334] "Generic (PLEG): container finished" podID="36d0e0cc-132a-48ad-83cc-15c5ffda48e1" containerID="c52e6c2544058ee6c149c5574f3bd6f297f8a376a4a929bddd0eb95ffe91fd29" exitCode=0 Jan 29 08:10:05 crc kubenswrapper[4861]: I0129 08:10:05.603749 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zt52c" event={"ID":"36d0e0cc-132a-48ad-83cc-15c5ffda48e1","Type":"ContainerDied","Data":"c52e6c2544058ee6c149c5574f3bd6f297f8a376a4a929bddd0eb95ffe91fd29"} Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.016281 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zt52c" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.022051 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.144552 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-operator-scripts\") pod \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\" (UID: \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\") " Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.144685 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnmwc\" (UniqueName: \"kubernetes.io/projected/b9c11f12-732d-470e-b8fa-29cbf4d977fb-kube-api-access-rnmwc\") pod \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\" (UID: \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\") " Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.144923 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24h4l\" (UniqueName: \"kubernetes.io/projected/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-kube-api-access-24h4l\") pod \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\" (UID: \"36d0e0cc-132a-48ad-83cc-15c5ffda48e1\") " Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.144977 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c11f12-732d-470e-b8fa-29cbf4d977fb-operator-scripts\") pod \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\" (UID: \"b9c11f12-732d-470e-b8fa-29cbf4d977fb\") " Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.145642 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "36d0e0cc-132a-48ad-83cc-15c5ffda48e1" (UID: "36d0e0cc-132a-48ad-83cc-15c5ffda48e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.146104 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9c11f12-732d-470e-b8fa-29cbf4d977fb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b9c11f12-732d-470e-b8fa-29cbf4d977fb" (UID: "b9c11f12-732d-470e-b8fa-29cbf4d977fb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.164436 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9c11f12-732d-470e-b8fa-29cbf4d977fb-kube-api-access-rnmwc" (OuterVolumeSpecName: "kube-api-access-rnmwc") pod "b9c11f12-732d-470e-b8fa-29cbf4d977fb" (UID: "b9c11f12-732d-470e-b8fa-29cbf4d977fb"). InnerVolumeSpecName "kube-api-access-rnmwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.164703 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-kube-api-access-24h4l" (OuterVolumeSpecName: "kube-api-access-24h4l") pod "36d0e0cc-132a-48ad-83cc-15c5ffda48e1" (UID: "36d0e0cc-132a-48ad-83cc-15c5ffda48e1"). InnerVolumeSpecName "kube-api-access-24h4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.247639 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24h4l\" (UniqueName: \"kubernetes.io/projected/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-kube-api-access-24h4l\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.247688 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9c11f12-732d-470e-b8fa-29cbf4d977fb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.247717 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d0e0cc-132a-48ad-83cc-15c5ffda48e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.247734 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnmwc\" (UniqueName: \"kubernetes.io/projected/b9c11f12-732d-470e-b8fa-29cbf4d977fb-kube-api-access-rnmwc\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.626738 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-9082-account-create-update-82jff" event={"ID":"b9c11f12-732d-470e-b8fa-29cbf4d977fb","Type":"ContainerDied","Data":"472b8d631c7f88f476207b98a12993b177375b77d90cec4c0ff2909dc64aacc8"} Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.627109 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="472b8d631c7f88f476207b98a12993b177375b77d90cec4c0ff2909dc64aacc8" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.626793 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-9082-account-create-update-82jff" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.629022 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zt52c" event={"ID":"36d0e0cc-132a-48ad-83cc-15c5ffda48e1","Type":"ContainerDied","Data":"e6eae762062c61f7916a7333b634dafbaa27b6794c29d30e7c972becb2f5ba9e"} Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.629161 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6eae762062c61f7916a7333b634dafbaa27b6794c29d30e7c972becb2f5ba9e" Jan 29 08:10:07 crc kubenswrapper[4861]: I0129 08:10:07.629118 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zt52c" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.834492 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-rnh8s"] Jan 29 08:10:08 crc kubenswrapper[4861]: E0129 08:10:08.835157 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36d0e0cc-132a-48ad-83cc-15c5ffda48e1" containerName="mariadb-database-create" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.835580 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="36d0e0cc-132a-48ad-83cc-15c5ffda48e1" containerName="mariadb-database-create" Jan 29 08:10:08 crc kubenswrapper[4861]: E0129 08:10:08.835612 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9c11f12-732d-470e-b8fa-29cbf4d977fb" containerName="mariadb-account-create-update" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.835625 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9c11f12-732d-470e-b8fa-29cbf4d977fb" containerName="mariadb-account-create-update" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.835929 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9c11f12-732d-470e-b8fa-29cbf4d977fb" containerName="mariadb-account-create-update" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.835982 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="36d0e0cc-132a-48ad-83cc-15c5ffda48e1" containerName="mariadb-database-create" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.837002 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.840298 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.841621 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cg74x" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.845951 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-rnh8s"] Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.882956 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-db-sync-config-data\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.883021 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-combined-ca-bundle\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.883206 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-config-data\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.883424 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ljkc\" (UniqueName: \"kubernetes.io/projected/215d3063-4d55-431e-a474-612c0ab49a24-kube-api-access-6ljkc\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.986139 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-db-sync-config-data\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.986246 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-combined-ca-bundle\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.986312 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-config-data\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.986420 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ljkc\" (UniqueName: \"kubernetes.io/projected/215d3063-4d55-431e-a474-612c0ab49a24-kube-api-access-6ljkc\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.993944 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-combined-ca-bundle\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:08 crc kubenswrapper[4861]: I0129 08:10:08.995553 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-db-sync-config-data\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:09 crc kubenswrapper[4861]: I0129 08:10:09.002302 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-config-data\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:09 crc kubenswrapper[4861]: I0129 08:10:09.007214 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ljkc\" (UniqueName: \"kubernetes.io/projected/215d3063-4d55-431e-a474-612c0ab49a24-kube-api-access-6ljkc\") pod \"glance-db-sync-rnh8s\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:09 crc kubenswrapper[4861]: I0129 08:10:09.187410 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cg74x" Jan 29 08:10:09 crc kubenswrapper[4861]: I0129 08:10:09.195140 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:09 crc kubenswrapper[4861]: I0129 08:10:09.547252 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-rnh8s"] Jan 29 08:10:09 crc kubenswrapper[4861]: I0129 08:10:09.648888 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-rnh8s" event={"ID":"215d3063-4d55-431e-a474-612c0ab49a24","Type":"ContainerStarted","Data":"5608bc365ead218990f1f26be1a6a47b94e8a878755097ff1dbe6e054fc0c42a"} Jan 29 08:10:10 crc kubenswrapper[4861]: I0129 08:10:10.660407 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-rnh8s" event={"ID":"215d3063-4d55-431e-a474-612c0ab49a24","Type":"ContainerStarted","Data":"24e63608206df2b802c8a67ec96560fd65d464a572c46d304fa7aa32556811d1"} Jan 29 08:10:10 crc kubenswrapper[4861]: I0129 08:10:10.679539 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-rnh8s" podStartSLOduration=2.679512314 podStartE2EDuration="2.679512314s" podCreationTimestamp="2026-01-29 08:10:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:10.672799738 +0000 UTC m=+5702.344294315" watchObservedRunningTime="2026-01-29 08:10:10.679512314 +0000 UTC m=+5702.351006891" Jan 29 08:10:13 crc kubenswrapper[4861]: I0129 08:10:13.693504 4861 generic.go:334] "Generic (PLEG): container finished" podID="215d3063-4d55-431e-a474-612c0ab49a24" containerID="24e63608206df2b802c8a67ec96560fd65d464a572c46d304fa7aa32556811d1" exitCode=0 Jan 29 08:10:13 crc kubenswrapper[4861]: I0129 08:10:13.693555 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-rnh8s" event={"ID":"215d3063-4d55-431e-a474-612c0ab49a24","Type":"ContainerDied","Data":"24e63608206df2b802c8a67ec96560fd65d464a572c46d304fa7aa32556811d1"} Jan 29 08:10:14 crc kubenswrapper[4861]: I0129 08:10:14.116356 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:10:14 crc kubenswrapper[4861]: E0129 08:10:14.116659 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.175961 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.212896 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ljkc\" (UniqueName: \"kubernetes.io/projected/215d3063-4d55-431e-a474-612c0ab49a24-kube-api-access-6ljkc\") pod \"215d3063-4d55-431e-a474-612c0ab49a24\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.213130 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-db-sync-config-data\") pod \"215d3063-4d55-431e-a474-612c0ab49a24\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.213205 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-config-data\") pod \"215d3063-4d55-431e-a474-612c0ab49a24\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.213258 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-combined-ca-bundle\") pod \"215d3063-4d55-431e-a474-612c0ab49a24\" (UID: \"215d3063-4d55-431e-a474-612c0ab49a24\") " Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.218231 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "215d3063-4d55-431e-a474-612c0ab49a24" (UID: "215d3063-4d55-431e-a474-612c0ab49a24"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.218393 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/215d3063-4d55-431e-a474-612c0ab49a24-kube-api-access-6ljkc" (OuterVolumeSpecName: "kube-api-access-6ljkc") pod "215d3063-4d55-431e-a474-612c0ab49a24" (UID: "215d3063-4d55-431e-a474-612c0ab49a24"). InnerVolumeSpecName "kube-api-access-6ljkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.262940 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "215d3063-4d55-431e-a474-612c0ab49a24" (UID: "215d3063-4d55-431e-a474-612c0ab49a24"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.295651 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-config-data" (OuterVolumeSpecName: "config-data") pod "215d3063-4d55-431e-a474-612c0ab49a24" (UID: "215d3063-4d55-431e-a474-612c0ab49a24"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.315246 4861 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.315287 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.315298 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d3063-4d55-431e-a474-612c0ab49a24-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.315310 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ljkc\" (UniqueName: \"kubernetes.io/projected/215d3063-4d55-431e-a474-612c0ab49a24-kube-api-access-6ljkc\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.719959 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-rnh8s" event={"ID":"215d3063-4d55-431e-a474-612c0ab49a24","Type":"ContainerDied","Data":"5608bc365ead218990f1f26be1a6a47b94e8a878755097ff1dbe6e054fc0c42a"} Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.720007 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5608bc365ead218990f1f26be1a6a47b94e8a878755097ff1dbe6e054fc0c42a" Jan 29 08:10:15 crc kubenswrapper[4861]: I0129 08:10:15.720032 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-rnh8s" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.038698 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:16 crc kubenswrapper[4861]: E0129 08:10:16.039135 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="215d3063-4d55-431e-a474-612c0ab49a24" containerName="glance-db-sync" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.039153 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="215d3063-4d55-431e-a474-612c0ab49a24" containerName="glance-db-sync" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.039306 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="215d3063-4d55-431e-a474-612c0ab49a24" containerName="glance-db-sync" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.040247 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.043968 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.044616 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cg74x" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.047290 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.070207 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.122114 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c5dc4b95-jz4r4"] Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.123501 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.131181 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-logs\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.131223 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.131278 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-config-data\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.131319 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vdwl\" (UniqueName: \"kubernetes.io/projected/6d34d38e-bd2b-4232-97af-707617c2fd54-kube-api-access-8vdwl\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.131435 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.131453 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-scripts\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.131543 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c5dc4b95-jz4r4"] Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.232985 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233034 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-scripts\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233094 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-logs\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233123 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233162 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz6fq\" (UniqueName: \"kubernetes.io/projected/151bed27-ae47-4828-b809-03b07d23c8c3-kube-api-access-zz6fq\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233197 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-sb\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233234 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-config-data\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233281 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vdwl\" (UniqueName: \"kubernetes.io/projected/6d34d38e-bd2b-4232-97af-707617c2fd54-kube-api-access-8vdwl\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233300 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-nb\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233343 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-config\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.233382 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-dns-svc\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.234134 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.235429 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-logs\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.238393 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.238798 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-config-data\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.242737 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-scripts\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.260393 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vdwl\" (UniqueName: \"kubernetes.io/projected/6d34d38e-bd2b-4232-97af-707617c2fd54-kube-api-access-8vdwl\") pod \"glance-default-external-api-0\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.284352 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.285633 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.287723 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.308964 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334383 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz6fq\" (UniqueName: \"kubernetes.io/projected/151bed27-ae47-4828-b809-03b07d23c8c3-kube-api-access-zz6fq\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334443 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-sb\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334468 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334516 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-nb\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334545 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334563 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mzpx\" (UniqueName: \"kubernetes.io/projected/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-kube-api-access-7mzpx\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334578 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334597 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-config\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334615 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334649 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-dns-svc\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.334664 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-logs\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.335526 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-nb\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.335666 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-dns-svc\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.335901 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-config\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.336063 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-sb\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.352780 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz6fq\" (UniqueName: \"kubernetes.io/projected/151bed27-ae47-4828-b809-03b07d23c8c3-kube-api-access-zz6fq\") pod \"dnsmasq-dns-7c5dc4b95-jz4r4\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.363627 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.436501 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.436587 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.436609 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mzpx\" (UniqueName: \"kubernetes.io/projected/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-kube-api-access-7mzpx\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.436627 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.436649 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.436680 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-logs\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.437254 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-logs\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.438140 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.443222 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.443473 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.444115 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.447823 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.454820 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mzpx\" (UniqueName: \"kubernetes.io/projected/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-kube-api-access-7mzpx\") pod \"glance-default-internal-api-0\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.652268 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.923370 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:16 crc kubenswrapper[4861]: I0129 08:10:16.965105 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c5dc4b95-jz4r4"] Jan 29 08:10:16 crc kubenswrapper[4861]: W0129 08:10:16.967650 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod151bed27_ae47_4828_b809_03b07d23c8c3.slice/crio-b3992b46c944b5b3c994a82ab758665cde3fc193e6e5af78fa4de56439603d0c WatchSource:0}: Error finding container b3992b46c944b5b3c994a82ab758665cde3fc193e6e5af78fa4de56439603d0c: Status 404 returned error can't find the container with id b3992b46c944b5b3c994a82ab758665cde3fc193e6e5af78fa4de56439603d0c Jan 29 08:10:17 crc kubenswrapper[4861]: I0129 08:10:17.093060 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:17.263229 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:17.750552 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6d34d38e-bd2b-4232-97af-707617c2fd54","Type":"ContainerStarted","Data":"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:17.750968 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6d34d38e-bd2b-4232-97af-707617c2fd54","Type":"ContainerStarted","Data":"c7ed49c1d3f1e241f0c74e71591c8a03075946237f243a73786830f6baccf3c4"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:17.751998 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0f962d07-d47f-4cb9-9b97-40b8a53fca5c","Type":"ContainerStarted","Data":"c9a122ff3812273974b1181514511aa37a39cab47bf0f263ae756eaaf7c2d4fe"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:17.753991 4861 generic.go:334] "Generic (PLEG): container finished" podID="151bed27-ae47-4828-b809-03b07d23c8c3" containerID="a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3" exitCode=0 Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:17.754021 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" event={"ID":"151bed27-ae47-4828-b809-03b07d23c8c3","Type":"ContainerDied","Data":"a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:17.754037 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" event={"ID":"151bed27-ae47-4828-b809-03b07d23c8c3","Type":"ContainerStarted","Data":"b3992b46c944b5b3c994a82ab758665cde3fc193e6e5af78fa4de56439603d0c"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.338247 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.773986 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6d34d38e-bd2b-4232-97af-707617c2fd54","Type":"ContainerStarted","Data":"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.774170 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerName="glance-log" containerID="cri-o://9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93" gracePeriod=30 Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.774637 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerName="glance-httpd" containerID="cri-o://b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462" gracePeriod=30 Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.782007 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerName="glance-log" containerID="cri-o://3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972" gracePeriod=30 Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.782154 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0f962d07-d47f-4cb9-9b97-40b8a53fca5c","Type":"ContainerStarted","Data":"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.782196 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0f962d07-d47f-4cb9-9b97-40b8a53fca5c","Type":"ContainerStarted","Data":"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.782252 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerName="glance-httpd" containerID="cri-o://dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9" gracePeriod=30 Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.784993 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" event={"ID":"151bed27-ae47-4828-b809-03b07d23c8c3","Type":"ContainerStarted","Data":"87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209"} Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.785199 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.807430 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=2.807412933 podStartE2EDuration="2.807412933s" podCreationTimestamp="2026-01-29 08:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:18.80003685 +0000 UTC m=+5710.471531417" watchObservedRunningTime="2026-01-29 08:10:18.807412933 +0000 UTC m=+5710.478907490" Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.829117 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" podStartSLOduration=2.829097221 podStartE2EDuration="2.829097221s" podCreationTimestamp="2026-01-29 08:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:18.821023469 +0000 UTC m=+5710.492518026" watchObservedRunningTime="2026-01-29 08:10:18.829097221 +0000 UTC m=+5710.500591778" Jan 29 08:10:18 crc kubenswrapper[4861]: I0129 08:10:18.848584 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=2.84856059 podStartE2EDuration="2.84856059s" podCreationTimestamp="2026-01-29 08:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:18.841941427 +0000 UTC m=+5710.513435994" watchObservedRunningTime="2026-01-29 08:10:18.84856059 +0000 UTC m=+5710.520055147" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.325695 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.408896 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-scripts\") pod \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.408956 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-logs\") pod \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.409051 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-combined-ca-bundle\") pod \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.409098 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-httpd-run\") pod \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.409135 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-config-data\") pod \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.409215 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mzpx\" (UniqueName: \"kubernetes.io/projected/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-kube-api-access-7mzpx\") pod \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\" (UID: \"0f962d07-d47f-4cb9-9b97-40b8a53fca5c\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.409623 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-logs" (OuterVolumeSpecName: "logs") pod "0f962d07-d47f-4cb9-9b97-40b8a53fca5c" (UID: "0f962d07-d47f-4cb9-9b97-40b8a53fca5c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.409626 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0f962d07-d47f-4cb9-9b97-40b8a53fca5c" (UID: "0f962d07-d47f-4cb9-9b97-40b8a53fca5c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.410274 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.410303 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.414697 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-scripts" (OuterVolumeSpecName: "scripts") pod "0f962d07-d47f-4cb9-9b97-40b8a53fca5c" (UID: "0f962d07-d47f-4cb9-9b97-40b8a53fca5c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.414724 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-kube-api-access-7mzpx" (OuterVolumeSpecName: "kube-api-access-7mzpx") pod "0f962d07-d47f-4cb9-9b97-40b8a53fca5c" (UID: "0f962d07-d47f-4cb9-9b97-40b8a53fca5c"). InnerVolumeSpecName "kube-api-access-7mzpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.433571 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f962d07-d47f-4cb9-9b97-40b8a53fca5c" (UID: "0f962d07-d47f-4cb9-9b97-40b8a53fca5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.463661 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.468052 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-config-data" (OuterVolumeSpecName: "config-data") pod "0f962d07-d47f-4cb9-9b97-40b8a53fca5c" (UID: "0f962d07-d47f-4cb9-9b97-40b8a53fca5c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.511315 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vdwl\" (UniqueName: \"kubernetes.io/projected/6d34d38e-bd2b-4232-97af-707617c2fd54-kube-api-access-8vdwl\") pod \"6d34d38e-bd2b-4232-97af-707617c2fd54\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.511371 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-scripts\") pod \"6d34d38e-bd2b-4232-97af-707617c2fd54\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.511426 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-logs\") pod \"6d34d38e-bd2b-4232-97af-707617c2fd54\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.511594 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-httpd-run\") pod \"6d34d38e-bd2b-4232-97af-707617c2fd54\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.511654 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-config-data\") pod \"6d34d38e-bd2b-4232-97af-707617c2fd54\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.511741 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-combined-ca-bundle\") pod \"6d34d38e-bd2b-4232-97af-707617c2fd54\" (UID: \"6d34d38e-bd2b-4232-97af-707617c2fd54\") " Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.511941 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-logs" (OuterVolumeSpecName: "logs") pod "6d34d38e-bd2b-4232-97af-707617c2fd54" (UID: "6d34d38e-bd2b-4232-97af-707617c2fd54"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.511975 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6d34d38e-bd2b-4232-97af-707617c2fd54" (UID: "6d34d38e-bd2b-4232-97af-707617c2fd54"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.512511 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.512526 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.512538 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.512548 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mzpx\" (UniqueName: \"kubernetes.io/projected/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-kube-api-access-7mzpx\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.512559 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d34d38e-bd2b-4232-97af-707617c2fd54-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.512568 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f962d07-d47f-4cb9-9b97-40b8a53fca5c-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.544966 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-scripts" (OuterVolumeSpecName: "scripts") pod "6d34d38e-bd2b-4232-97af-707617c2fd54" (UID: "6d34d38e-bd2b-4232-97af-707617c2fd54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.547257 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d34d38e-bd2b-4232-97af-707617c2fd54-kube-api-access-8vdwl" (OuterVolumeSpecName: "kube-api-access-8vdwl") pod "6d34d38e-bd2b-4232-97af-707617c2fd54" (UID: "6d34d38e-bd2b-4232-97af-707617c2fd54"). InnerVolumeSpecName "kube-api-access-8vdwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.582777 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d34d38e-bd2b-4232-97af-707617c2fd54" (UID: "6d34d38e-bd2b-4232-97af-707617c2fd54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.590231 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-config-data" (OuterVolumeSpecName: "config-data") pod "6d34d38e-bd2b-4232-97af-707617c2fd54" (UID: "6d34d38e-bd2b-4232-97af-707617c2fd54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.614741 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.614975 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vdwl\" (UniqueName: \"kubernetes.io/projected/6d34d38e-bd2b-4232-97af-707617c2fd54-kube-api-access-8vdwl\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.614990 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.615001 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d34d38e-bd2b-4232-97af-707617c2fd54-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.794777 4861 generic.go:334] "Generic (PLEG): container finished" podID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerID="b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462" exitCode=0 Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.794809 4861 generic.go:334] "Generic (PLEG): container finished" podID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerID="9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93" exitCode=143 Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.794848 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6d34d38e-bd2b-4232-97af-707617c2fd54","Type":"ContainerDied","Data":"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462"} Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.794877 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6d34d38e-bd2b-4232-97af-707617c2fd54","Type":"ContainerDied","Data":"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93"} Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.794891 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6d34d38e-bd2b-4232-97af-707617c2fd54","Type":"ContainerDied","Data":"c7ed49c1d3f1e241f0c74e71591c8a03075946237f243a73786830f6baccf3c4"} Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.794909 4861 scope.go:117] "RemoveContainer" containerID="b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.795015 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.802435 4861 generic.go:334] "Generic (PLEG): container finished" podID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerID="dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9" exitCode=143 Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.802590 4861 generic.go:334] "Generic (PLEG): container finished" podID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerID="3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972" exitCode=143 Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.802552 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0f962d07-d47f-4cb9-9b97-40b8a53fca5c","Type":"ContainerDied","Data":"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9"} Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.802719 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0f962d07-d47f-4cb9-9b97-40b8a53fca5c","Type":"ContainerDied","Data":"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972"} Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.802734 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0f962d07-d47f-4cb9-9b97-40b8a53fca5c","Type":"ContainerDied","Data":"c9a122ff3812273974b1181514511aa37a39cab47bf0f263ae756eaaf7c2d4fe"} Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.802531 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.874324 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.893491 4861 scope.go:117] "RemoveContainer" containerID="9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.896276 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.903242 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.903650 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerName="glance-httpd" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.903664 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerName="glance-httpd" Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.903685 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerName="glance-log" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.903694 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerName="glance-log" Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.903704 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerName="glance-httpd" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.903714 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerName="glance-httpd" Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.903744 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerName="glance-log" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.903754 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerName="glance-log" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.903963 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerName="glance-httpd" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.903986 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" containerName="glance-log" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.903998 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerName="glance-log" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.904019 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" containerName="glance-httpd" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.910356 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.919538 4861 scope.go:117] "RemoveContainer" containerID="b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462" Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.919947 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462\": container with ID starting with b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462 not found: ID does not exist" containerID="b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.919972 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462"} err="failed to get container status \"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462\": rpc error: code = NotFound desc = could not find container \"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462\": container with ID starting with b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462 not found: ID does not exist" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.919991 4861 scope.go:117] "RemoveContainer" containerID="9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.920109 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.920199 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93\": container with ID starting with 9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93 not found: ID does not exist" containerID="9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.920219 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93"} err="failed to get container status \"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93\": rpc error: code = NotFound desc = could not find container \"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93\": container with ID starting with 9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93 not found: ID does not exist" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.920232 4861 scope.go:117] "RemoveContainer" containerID="b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.920429 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.920679 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.921119 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462"} err="failed to get container status \"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462\": rpc error: code = NotFound desc = could not find container \"b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462\": container with ID starting with b44f98f0138009f2821e0d44d40e62eda6696b226bd5784501b22331888da462 not found: ID does not exist" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.921227 4861 scope.go:117] "RemoveContainer" containerID="9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.921487 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93"} err="failed to get container status \"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93\": rpc error: code = NotFound desc = could not find container \"9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93\": container with ID starting with 9257dc2c18e90eaa7e97e1bbc1a52e79724413d5b4233c7b75517d0cdefe0a93 not found: ID does not exist" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.921540 4861 scope.go:117] "RemoveContainer" containerID="dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.921763 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.934490 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cg74x" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.965001 4861 scope.go:117] "RemoveContainer" containerID="3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.966620 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.973308 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f962d07_d47f_4cb9_9b97_40b8a53fca5c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d34d38e_bd2b_4232_97af_707617c2fd54.slice/crio-c7ed49c1d3f1e241f0c74e71591c8a03075946237f243a73786830f6baccf3c4\": RecentStats: unable to find data in memory cache]" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.987393 4861 scope.go:117] "RemoveContainer" containerID="dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.987490 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.987880 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9\": container with ID starting with dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9 not found: ID does not exist" containerID="dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.987907 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9"} err="failed to get container status \"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9\": rpc error: code = NotFound desc = could not find container \"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9\": container with ID starting with dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9 not found: ID does not exist" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.987925 4861 scope.go:117] "RemoveContainer" containerID="3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972" Jan 29 08:10:19 crc kubenswrapper[4861]: E0129 08:10:19.988097 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972\": container with ID starting with 3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972 not found: ID does not exist" containerID="3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.988117 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972"} err="failed to get container status \"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972\": rpc error: code = NotFound desc = could not find container \"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972\": container with ID starting with 3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972 not found: ID does not exist" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.988128 4861 scope.go:117] "RemoveContainer" containerID="dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.988282 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9"} err="failed to get container status \"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9\": rpc error: code = NotFound desc = could not find container \"dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9\": container with ID starting with dd16d53e430d387c0b4efa839233ad5645275b2bb85ea64384f8f76232fcfdf9 not found: ID does not exist" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.988303 4861 scope.go:117] "RemoveContainer" containerID="3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.988504 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972"} err="failed to get container status \"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972\": rpc error: code = NotFound desc = could not find container \"3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972\": container with ID starting with 3bdf86fd60bce45fb0ad7a6cd459106596ba5036397166e9a2f1b7cef1cd3972 not found: ID does not exist" Jan 29 08:10:19 crc kubenswrapper[4861]: I0129 08:10:19.999225 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.001004 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.003478 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.004106 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.011310 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.058742 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-logs\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.058804 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.058849 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9mfg\" (UniqueName: \"kubernetes.io/projected/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-kube-api-access-k9mfg\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.058949 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.058981 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.059021 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.059087 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.160388 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-logs\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.160894 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.160830 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-logs\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.160972 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9mfg\" (UniqueName: \"kubernetes.io/projected/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-kube-api-access-k9mfg\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161251 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161308 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161376 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-logs\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161406 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-config-data\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161452 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161527 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161552 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161577 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161602 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-scripts\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161627 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161658 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l5kp\" (UniqueName: \"kubernetes.io/projected/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-kube-api-access-5l5kp\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.161685 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.166208 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.166421 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.168116 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.169971 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.187402 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9mfg\" (UniqueName: \"kubernetes.io/projected/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-kube-api-access-k9mfg\") pod \"glance-default-internal-api-0\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.262188 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.263223 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.263419 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-scripts\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.265101 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l5kp\" (UniqueName: \"kubernetes.io/projected/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-kube-api-access-5l5kp\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.263829 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.265381 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.265581 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-logs\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.265635 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-config-data\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.265682 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.266892 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-logs\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.267857 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-scripts\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.271591 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.271947 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-config-data\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.272014 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.286920 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l5kp\" (UniqueName: \"kubernetes.io/projected/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-kube-api-access-5l5kp\") pod \"glance-default-external-api-0\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.329371 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:10:20 crc kubenswrapper[4861]: I0129 08:10:20.870454 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:10:21 crc kubenswrapper[4861]: I0129 08:10:21.004397 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:10:21 crc kubenswrapper[4861]: I0129 08:10:21.129805 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f962d07-d47f-4cb9-9b97-40b8a53fca5c" path="/var/lib/kubelet/pods/0f962d07-d47f-4cb9-9b97-40b8a53fca5c/volumes" Jan 29 08:10:21 crc kubenswrapper[4861]: I0129 08:10:21.131746 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d34d38e-bd2b-4232-97af-707617c2fd54" path="/var/lib/kubelet/pods/6d34d38e-bd2b-4232-97af-707617c2fd54/volumes" Jan 29 08:10:21 crc kubenswrapper[4861]: I0129 08:10:21.821241 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d","Type":"ContainerStarted","Data":"1c665bc05a78b77d6a80786dbce31909183c32f4ca989b40fd63de4386b8a22b"} Jan 29 08:10:21 crc kubenswrapper[4861]: I0129 08:10:21.821563 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d","Type":"ContainerStarted","Data":"805d989ce5d2ddba87d41b9aef1fcfcc17a5ee8ca402f4b130b572f2149aae4e"} Jan 29 08:10:21 crc kubenswrapper[4861]: I0129 08:10:21.827856 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1","Type":"ContainerStarted","Data":"27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d"} Jan 29 08:10:21 crc kubenswrapper[4861]: I0129 08:10:21.827894 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1","Type":"ContainerStarted","Data":"4f7ba354214f91c713beebba3ed0e9f025fd698f6fc600c70bb0d9ad11c933d7"} Jan 29 08:10:22 crc kubenswrapper[4861]: I0129 08:10:22.857235 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1","Type":"ContainerStarted","Data":"fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f"} Jan 29 08:10:22 crc kubenswrapper[4861]: I0129 08:10:22.862596 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d","Type":"ContainerStarted","Data":"5ae1c90c85b006c4ff066b1990e099f99eeec0dde3dd97e124951475df5bd1f6"} Jan 29 08:10:22 crc kubenswrapper[4861]: I0129 08:10:22.889274 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.889239775 podStartE2EDuration="3.889239775s" podCreationTimestamp="2026-01-29 08:10:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:22.882675593 +0000 UTC m=+5714.554170170" watchObservedRunningTime="2026-01-29 08:10:22.889239775 +0000 UTC m=+5714.560734372" Jan 29 08:10:22 crc kubenswrapper[4861]: I0129 08:10:22.916317 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.916289003 podStartE2EDuration="3.916289003s" podCreationTimestamp="2026-01-29 08:10:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:22.910019089 +0000 UTC m=+5714.581513716" watchObservedRunningTime="2026-01-29 08:10:22.916289003 +0000 UTC m=+5714.587783570" Jan 29 08:10:26 crc kubenswrapper[4861]: I0129 08:10:26.251630 4861 scope.go:117] "RemoveContainer" containerID="e09b0918194f963720a53191a04b37942582b7019c3522a516b6ab0c78cc184c" Jan 29 08:10:26 crc kubenswrapper[4861]: I0129 08:10:26.450372 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:26 crc kubenswrapper[4861]: I0129 08:10:26.515097 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b5f4c7889-pb9x6"] Jan 29 08:10:26 crc kubenswrapper[4861]: I0129 08:10:26.515388 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" podUID="c4b37722-dbe9-4212-9c67-a28c9adc613c" containerName="dnsmasq-dns" containerID="cri-o://1c7b7e1d08b7053e14c44b368a4bbf8e4b66c58864a6b18bca43de788f35670e" gracePeriod=10 Jan 29 08:10:26 crc kubenswrapper[4861]: I0129 08:10:26.917391 4861 generic.go:334] "Generic (PLEG): container finished" podID="c4b37722-dbe9-4212-9c67-a28c9adc613c" containerID="1c7b7e1d08b7053e14c44b368a4bbf8e4b66c58864a6b18bca43de788f35670e" exitCode=0 Jan 29 08:10:26 crc kubenswrapper[4861]: I0129 08:10:26.917426 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" event={"ID":"c4b37722-dbe9-4212-9c67-a28c9adc613c","Type":"ContainerDied","Data":"1c7b7e1d08b7053e14c44b368a4bbf8e4b66c58864a6b18bca43de788f35670e"} Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.008649 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.111744 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj9l4\" (UniqueName: \"kubernetes.io/projected/c4b37722-dbe9-4212-9c67-a28c9adc613c-kube-api-access-tj9l4\") pod \"c4b37722-dbe9-4212-9c67-a28c9adc613c\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.111818 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-sb\") pod \"c4b37722-dbe9-4212-9c67-a28c9adc613c\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.111874 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-nb\") pod \"c4b37722-dbe9-4212-9c67-a28c9adc613c\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.111907 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-config\") pod \"c4b37722-dbe9-4212-9c67-a28c9adc613c\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.111966 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-dns-svc\") pod \"c4b37722-dbe9-4212-9c67-a28c9adc613c\" (UID: \"c4b37722-dbe9-4212-9c67-a28c9adc613c\") " Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.118581 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4b37722-dbe9-4212-9c67-a28c9adc613c-kube-api-access-tj9l4" (OuterVolumeSpecName: "kube-api-access-tj9l4") pod "c4b37722-dbe9-4212-9c67-a28c9adc613c" (UID: "c4b37722-dbe9-4212-9c67-a28c9adc613c"). InnerVolumeSpecName "kube-api-access-tj9l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.157687 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c4b37722-dbe9-4212-9c67-a28c9adc613c" (UID: "c4b37722-dbe9-4212-9c67-a28c9adc613c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.165213 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c4b37722-dbe9-4212-9c67-a28c9adc613c" (UID: "c4b37722-dbe9-4212-9c67-a28c9adc613c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.169547 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c4b37722-dbe9-4212-9c67-a28c9adc613c" (UID: "c4b37722-dbe9-4212-9c67-a28c9adc613c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.175948 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-config" (OuterVolumeSpecName: "config") pod "c4b37722-dbe9-4212-9c67-a28c9adc613c" (UID: "c4b37722-dbe9-4212-9c67-a28c9adc613c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.214438 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.214470 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.214479 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.214489 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj9l4\" (UniqueName: \"kubernetes.io/projected/c4b37722-dbe9-4212-9c67-a28c9adc613c-kube-api-access-tj9l4\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.214499 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c4b37722-dbe9-4212-9c67-a28c9adc613c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.926767 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" event={"ID":"c4b37722-dbe9-4212-9c67-a28c9adc613c","Type":"ContainerDied","Data":"493cd5ce83dac3d0e51ea9545d3a7ddc35ac2e092adcca546b9b95fc94f11505"} Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.926824 4861 scope.go:117] "RemoveContainer" containerID="1c7b7e1d08b7053e14c44b368a4bbf8e4b66c58864a6b18bca43de788f35670e" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.926874 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5f4c7889-pb9x6" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.959115 4861 scope.go:117] "RemoveContainer" containerID="c1baae1bae0e3bb6047e8af5223777de50c4aab048026e073b0799c1dd9a7a32" Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.964321 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b5f4c7889-pb9x6"] Jan 29 08:10:27 crc kubenswrapper[4861]: I0129 08:10:27.977024 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7b5f4c7889-pb9x6"] Jan 29 08:10:29 crc kubenswrapper[4861]: I0129 08:10:29.121831 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:10:29 crc kubenswrapper[4861]: E0129 08:10:29.122613 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:10:29 crc kubenswrapper[4861]: I0129 08:10:29.138033 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4b37722-dbe9-4212-9c67-a28c9adc613c" path="/var/lib/kubelet/pods/c4b37722-dbe9-4212-9c67-a28c9adc613c/volumes" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.262646 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.263039 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.309503 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.309880 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.330503 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.330566 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.359371 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.372297 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.963322 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.963370 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.963389 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:30 crc kubenswrapper[4861]: I0129 08:10:30.963405 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 08:10:32 crc kubenswrapper[4861]: I0129 08:10:32.832191 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:32 crc kubenswrapper[4861]: I0129 08:10:32.850195 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 08:10:32 crc kubenswrapper[4861]: I0129 08:10:32.854757 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 08:10:32 crc kubenswrapper[4861]: I0129 08:10:32.873709 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.609751 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-h4h6l"] Jan 29 08:10:40 crc kubenswrapper[4861]: E0129 08:10:40.610745 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4b37722-dbe9-4212-9c67-a28c9adc613c" containerName="dnsmasq-dns" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.610765 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4b37722-dbe9-4212-9c67-a28c9adc613c" containerName="dnsmasq-dns" Jan 29 08:10:40 crc kubenswrapper[4861]: E0129 08:10:40.610786 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4b37722-dbe9-4212-9c67-a28c9adc613c" containerName="init" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.610794 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4b37722-dbe9-4212-9c67-a28c9adc613c" containerName="init" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.610999 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4b37722-dbe9-4212-9c67-a28c9adc613c" containerName="dnsmasq-dns" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.611728 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.625538 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-h4h6l"] Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.673791 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zncf\" (UniqueName: \"kubernetes.io/projected/6343814f-8268-4820-98ca-ed7d2d2f33d0-kube-api-access-6zncf\") pod \"placement-db-create-h4h6l\" (UID: \"6343814f-8268-4820-98ca-ed7d2d2f33d0\") " pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.673881 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6343814f-8268-4820-98ca-ed7d2d2f33d0-operator-scripts\") pod \"placement-db-create-h4h6l\" (UID: \"6343814f-8268-4820-98ca-ed7d2d2f33d0\") " pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.710000 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7bed-account-create-update-zj98v"] Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.711240 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.713322 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.729315 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7bed-account-create-update-zj98v"] Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.775626 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bff87\" (UniqueName: \"kubernetes.io/projected/cffe5493-2381-493d-85b9-07b863ae8d2c-kube-api-access-bff87\") pod \"placement-7bed-account-create-update-zj98v\" (UID: \"cffe5493-2381-493d-85b9-07b863ae8d2c\") " pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.775835 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cffe5493-2381-493d-85b9-07b863ae8d2c-operator-scripts\") pod \"placement-7bed-account-create-update-zj98v\" (UID: \"cffe5493-2381-493d-85b9-07b863ae8d2c\") " pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.775935 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zncf\" (UniqueName: \"kubernetes.io/projected/6343814f-8268-4820-98ca-ed7d2d2f33d0-kube-api-access-6zncf\") pod \"placement-db-create-h4h6l\" (UID: \"6343814f-8268-4820-98ca-ed7d2d2f33d0\") " pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.776008 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6343814f-8268-4820-98ca-ed7d2d2f33d0-operator-scripts\") pod \"placement-db-create-h4h6l\" (UID: \"6343814f-8268-4820-98ca-ed7d2d2f33d0\") " pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.776878 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6343814f-8268-4820-98ca-ed7d2d2f33d0-operator-scripts\") pod \"placement-db-create-h4h6l\" (UID: \"6343814f-8268-4820-98ca-ed7d2d2f33d0\") " pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.806006 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zncf\" (UniqueName: \"kubernetes.io/projected/6343814f-8268-4820-98ca-ed7d2d2f33d0-kube-api-access-6zncf\") pod \"placement-db-create-h4h6l\" (UID: \"6343814f-8268-4820-98ca-ed7d2d2f33d0\") " pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.878796 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bff87\" (UniqueName: \"kubernetes.io/projected/cffe5493-2381-493d-85b9-07b863ae8d2c-kube-api-access-bff87\") pod \"placement-7bed-account-create-update-zj98v\" (UID: \"cffe5493-2381-493d-85b9-07b863ae8d2c\") " pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.878985 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cffe5493-2381-493d-85b9-07b863ae8d2c-operator-scripts\") pod \"placement-7bed-account-create-update-zj98v\" (UID: \"cffe5493-2381-493d-85b9-07b863ae8d2c\") " pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.879743 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cffe5493-2381-493d-85b9-07b863ae8d2c-operator-scripts\") pod \"placement-7bed-account-create-update-zj98v\" (UID: \"cffe5493-2381-493d-85b9-07b863ae8d2c\") " pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.903673 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bff87\" (UniqueName: \"kubernetes.io/projected/cffe5493-2381-493d-85b9-07b863ae8d2c-kube-api-access-bff87\") pod \"placement-7bed-account-create-update-zj98v\" (UID: \"cffe5493-2381-493d-85b9-07b863ae8d2c\") " pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:40 crc kubenswrapper[4861]: I0129 08:10:40.938722 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:41 crc kubenswrapper[4861]: I0129 08:10:41.027114 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:41 crc kubenswrapper[4861]: I0129 08:10:41.382431 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-h4h6l"] Jan 29 08:10:41 crc kubenswrapper[4861]: W0129 08:10:41.509143 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcffe5493_2381_493d_85b9_07b863ae8d2c.slice/crio-e7ad6ff5e515af3c9878a53d063b098da99a812f06deab82d7677a9488f066a5 WatchSource:0}: Error finding container e7ad6ff5e515af3c9878a53d063b098da99a812f06deab82d7677a9488f066a5: Status 404 returned error can't find the container with id e7ad6ff5e515af3c9878a53d063b098da99a812f06deab82d7677a9488f066a5 Jan 29 08:10:41 crc kubenswrapper[4861]: I0129 08:10:41.512979 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7bed-account-create-update-zj98v"] Jan 29 08:10:42 crc kubenswrapper[4861]: I0129 08:10:42.064894 4861 generic.go:334] "Generic (PLEG): container finished" podID="cffe5493-2381-493d-85b9-07b863ae8d2c" containerID="3c3bb80d979163a30693a6c0e6f92548937073636ecfe998a868fe7087e329a1" exitCode=0 Jan 29 08:10:42 crc kubenswrapper[4861]: I0129 08:10:42.064960 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bed-account-create-update-zj98v" event={"ID":"cffe5493-2381-493d-85b9-07b863ae8d2c","Type":"ContainerDied","Data":"3c3bb80d979163a30693a6c0e6f92548937073636ecfe998a868fe7087e329a1"} Jan 29 08:10:42 crc kubenswrapper[4861]: I0129 08:10:42.064983 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bed-account-create-update-zj98v" event={"ID":"cffe5493-2381-493d-85b9-07b863ae8d2c","Type":"ContainerStarted","Data":"e7ad6ff5e515af3c9878a53d063b098da99a812f06deab82d7677a9488f066a5"} Jan 29 08:10:42 crc kubenswrapper[4861]: I0129 08:10:42.066737 4861 generic.go:334] "Generic (PLEG): container finished" podID="6343814f-8268-4820-98ca-ed7d2d2f33d0" containerID="491f08d1639fc15e2efeb983d79c23bdd3fbb2e60e8eb7bb339f8b16464c4367" exitCode=0 Jan 29 08:10:42 crc kubenswrapper[4861]: I0129 08:10:42.066765 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h4h6l" event={"ID":"6343814f-8268-4820-98ca-ed7d2d2f33d0","Type":"ContainerDied","Data":"491f08d1639fc15e2efeb983d79c23bdd3fbb2e60e8eb7bb339f8b16464c4367"} Jan 29 08:10:42 crc kubenswrapper[4861]: I0129 08:10:42.066779 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h4h6l" event={"ID":"6343814f-8268-4820-98ca-ed7d2d2f33d0","Type":"ContainerStarted","Data":"298c79a79a89bd264561848b9b659c65f677d4ba3c7cbb416132971a0e45b563"} Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.117699 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:10:43 crc kubenswrapper[4861]: E0129 08:10:43.118517 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.485598 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.537900 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zncf\" (UniqueName: \"kubernetes.io/projected/6343814f-8268-4820-98ca-ed7d2d2f33d0-kube-api-access-6zncf\") pod \"6343814f-8268-4820-98ca-ed7d2d2f33d0\" (UID: \"6343814f-8268-4820-98ca-ed7d2d2f33d0\") " Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.538322 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6343814f-8268-4820-98ca-ed7d2d2f33d0-operator-scripts\") pod \"6343814f-8268-4820-98ca-ed7d2d2f33d0\" (UID: \"6343814f-8268-4820-98ca-ed7d2d2f33d0\") " Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.539174 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6343814f-8268-4820-98ca-ed7d2d2f33d0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6343814f-8268-4820-98ca-ed7d2d2f33d0" (UID: "6343814f-8268-4820-98ca-ed7d2d2f33d0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.543710 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6343814f-8268-4820-98ca-ed7d2d2f33d0-kube-api-access-6zncf" (OuterVolumeSpecName: "kube-api-access-6zncf") pod "6343814f-8268-4820-98ca-ed7d2d2f33d0" (UID: "6343814f-8268-4820-98ca-ed7d2d2f33d0"). InnerVolumeSpecName "kube-api-access-6zncf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.590242 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.640575 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bff87\" (UniqueName: \"kubernetes.io/projected/cffe5493-2381-493d-85b9-07b863ae8d2c-kube-api-access-bff87\") pod \"cffe5493-2381-493d-85b9-07b863ae8d2c\" (UID: \"cffe5493-2381-493d-85b9-07b863ae8d2c\") " Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.640785 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cffe5493-2381-493d-85b9-07b863ae8d2c-operator-scripts\") pod \"cffe5493-2381-493d-85b9-07b863ae8d2c\" (UID: \"cffe5493-2381-493d-85b9-07b863ae8d2c\") " Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.641436 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zncf\" (UniqueName: \"kubernetes.io/projected/6343814f-8268-4820-98ca-ed7d2d2f33d0-kube-api-access-6zncf\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.641468 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6343814f-8268-4820-98ca-ed7d2d2f33d0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.641586 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cffe5493-2381-493d-85b9-07b863ae8d2c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cffe5493-2381-493d-85b9-07b863ae8d2c" (UID: "cffe5493-2381-493d-85b9-07b863ae8d2c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.643947 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cffe5493-2381-493d-85b9-07b863ae8d2c-kube-api-access-bff87" (OuterVolumeSpecName: "kube-api-access-bff87") pod "cffe5493-2381-493d-85b9-07b863ae8d2c" (UID: "cffe5493-2381-493d-85b9-07b863ae8d2c"). InnerVolumeSpecName "kube-api-access-bff87". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.743022 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cffe5493-2381-493d-85b9-07b863ae8d2c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:43 crc kubenswrapper[4861]: I0129 08:10:43.743053 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bff87\" (UniqueName: \"kubernetes.io/projected/cffe5493-2381-493d-85b9-07b863ae8d2c-kube-api-access-bff87\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:44 crc kubenswrapper[4861]: I0129 08:10:44.088951 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h4h6l" Jan 29 08:10:44 crc kubenswrapper[4861]: I0129 08:10:44.089004 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h4h6l" event={"ID":"6343814f-8268-4820-98ca-ed7d2d2f33d0","Type":"ContainerDied","Data":"298c79a79a89bd264561848b9b659c65f677d4ba3c7cbb416132971a0e45b563"} Jan 29 08:10:44 crc kubenswrapper[4861]: I0129 08:10:44.089063 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="298c79a79a89bd264561848b9b659c65f677d4ba3c7cbb416132971a0e45b563" Jan 29 08:10:44 crc kubenswrapper[4861]: I0129 08:10:44.091451 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7bed-account-create-update-zj98v" Jan 29 08:10:44 crc kubenswrapper[4861]: I0129 08:10:44.091460 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7bed-account-create-update-zj98v" event={"ID":"cffe5493-2381-493d-85b9-07b863ae8d2c","Type":"ContainerDied","Data":"e7ad6ff5e515af3c9878a53d063b098da99a812f06deab82d7677a9488f066a5"} Jan 29 08:10:44 crc kubenswrapper[4861]: I0129 08:10:44.091573 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7ad6ff5e515af3c9878a53d063b098da99a812f06deab82d7677a9488f066a5" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.054406 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-vgw88"] Jan 29 08:10:46 crc kubenswrapper[4861]: E0129 08:10:46.055438 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6343814f-8268-4820-98ca-ed7d2d2f33d0" containerName="mariadb-database-create" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.055458 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6343814f-8268-4820-98ca-ed7d2d2f33d0" containerName="mariadb-database-create" Jan 29 08:10:46 crc kubenswrapper[4861]: E0129 08:10:46.055496 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffe5493-2381-493d-85b9-07b863ae8d2c" containerName="mariadb-account-create-update" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.055505 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffe5493-2381-493d-85b9-07b863ae8d2c" containerName="mariadb-account-create-update" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.055709 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="cffe5493-2381-493d-85b9-07b863ae8d2c" containerName="mariadb-account-create-update" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.055721 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6343814f-8268-4820-98ca-ed7d2d2f33d0" containerName="mariadb-database-create" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.056495 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.058993 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.059440 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-thbc8" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.070983 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vgw88"] Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.071501 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.106454 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59d96fd4d9-s6dl7"] Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.107879 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.114593 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59d96fd4d9-s6dl7"] Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.193688 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-scripts\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.193911 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-sb\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.193985 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a88d68-4cb1-4baa-b8a3-5568f520818e-logs\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.194050 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-nb\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.194162 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-config\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.194227 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-config-data\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.194290 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-dns-svc\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.194420 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-combined-ca-bundle\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.194536 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whtjp\" (UniqueName: \"kubernetes.io/projected/63a88d68-4cb1-4baa-b8a3-5568f520818e-kube-api-access-whtjp\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.194606 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nr9r\" (UniqueName: \"kubernetes.io/projected/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-kube-api-access-5nr9r\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296201 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-combined-ca-bundle\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296279 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whtjp\" (UniqueName: \"kubernetes.io/projected/63a88d68-4cb1-4baa-b8a3-5568f520818e-kube-api-access-whtjp\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296298 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nr9r\" (UniqueName: \"kubernetes.io/projected/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-kube-api-access-5nr9r\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296322 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-scripts\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296346 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-sb\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296362 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a88d68-4cb1-4baa-b8a3-5568f520818e-logs\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296381 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-nb\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296401 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-config\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296418 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-config-data\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.296433 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-dns-svc\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.297192 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-dns-svc\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.297444 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a88d68-4cb1-4baa-b8a3-5568f520818e-logs\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.297627 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-sb\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.297926 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-nb\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.300354 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-config\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.301041 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-scripts\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.302697 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-config-data\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.302802 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-combined-ca-bundle\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.316243 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nr9r\" (UniqueName: \"kubernetes.io/projected/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-kube-api-access-5nr9r\") pod \"dnsmasq-dns-59d96fd4d9-s6dl7\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.325014 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whtjp\" (UniqueName: \"kubernetes.io/projected/63a88d68-4cb1-4baa-b8a3-5568f520818e-kube-api-access-whtjp\") pod \"placement-db-sync-vgw88\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.385310 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.426327 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.856146 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vgw88"] Jan 29 08:10:46 crc kubenswrapper[4861]: W0129 08:10:46.865587 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63a88d68_4cb1_4baa_b8a3_5568f520818e.slice/crio-3df4242202d8a2547042b3a91e011fe6018bd6fdaed080d616418009caea68fe WatchSource:0}: Error finding container 3df4242202d8a2547042b3a91e011fe6018bd6fdaed080d616418009caea68fe: Status 404 returned error can't find the container with id 3df4242202d8a2547042b3a91e011fe6018bd6fdaed080d616418009caea68fe Jan 29 08:10:46 crc kubenswrapper[4861]: I0129 08:10:46.924906 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59d96fd4d9-s6dl7"] Jan 29 08:10:46 crc kubenswrapper[4861]: W0129 08:10:46.932851 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod347f68d9_759b_42c9_b01c_1bb2b5eccdd2.slice/crio-31aedd834e4d2854a232b29afb2ecdff0f317b18d024b2cd2991bdaac65c2726 WatchSource:0}: Error finding container 31aedd834e4d2854a232b29afb2ecdff0f317b18d024b2cd2991bdaac65c2726: Status 404 returned error can't find the container with id 31aedd834e4d2854a232b29afb2ecdff0f317b18d024b2cd2991bdaac65c2726 Jan 29 08:10:47 crc kubenswrapper[4861]: I0129 08:10:47.127233 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vgw88" event={"ID":"63a88d68-4cb1-4baa-b8a3-5568f520818e","Type":"ContainerStarted","Data":"1f1ce88188752c974ccb5cb5e01ee7b0fe56ae0bcf19b02a2b6fd59d9e68128a"} Jan 29 08:10:47 crc kubenswrapper[4861]: I0129 08:10:47.127310 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vgw88" event={"ID":"63a88d68-4cb1-4baa-b8a3-5568f520818e","Type":"ContainerStarted","Data":"3df4242202d8a2547042b3a91e011fe6018bd6fdaed080d616418009caea68fe"} Jan 29 08:10:47 crc kubenswrapper[4861]: I0129 08:10:47.127327 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" event={"ID":"347f68d9-759b-42c9-b01c-1bb2b5eccdd2","Type":"ContainerStarted","Data":"31aedd834e4d2854a232b29afb2ecdff0f317b18d024b2cd2991bdaac65c2726"} Jan 29 08:10:47 crc kubenswrapper[4861]: I0129 08:10:47.147620 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-vgw88" podStartSLOduration=1.147597216 podStartE2EDuration="1.147597216s" podCreationTimestamp="2026-01-29 08:10:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:47.144416013 +0000 UTC m=+5738.815910570" watchObservedRunningTime="2026-01-29 08:10:47.147597216 +0000 UTC m=+5738.819091773" Jan 29 08:10:48 crc kubenswrapper[4861]: I0129 08:10:48.128810 4861 generic.go:334] "Generic (PLEG): container finished" podID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" containerID="2db902c897091d5709551040dd8dea66e64fb50cb5561a34af724893f05e7e1b" exitCode=0 Jan 29 08:10:48 crc kubenswrapper[4861]: I0129 08:10:48.128957 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" event={"ID":"347f68d9-759b-42c9-b01c-1bb2b5eccdd2","Type":"ContainerDied","Data":"2db902c897091d5709551040dd8dea66e64fb50cb5561a34af724893f05e7e1b"} Jan 29 08:10:49 crc kubenswrapper[4861]: I0129 08:10:49.140476 4861 generic.go:334] "Generic (PLEG): container finished" podID="63a88d68-4cb1-4baa-b8a3-5568f520818e" containerID="1f1ce88188752c974ccb5cb5e01ee7b0fe56ae0bcf19b02a2b6fd59d9e68128a" exitCode=0 Jan 29 08:10:49 crc kubenswrapper[4861]: I0129 08:10:49.140568 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vgw88" event={"ID":"63a88d68-4cb1-4baa-b8a3-5568f520818e","Type":"ContainerDied","Data":"1f1ce88188752c974ccb5cb5e01ee7b0fe56ae0bcf19b02a2b6fd59d9e68128a"} Jan 29 08:10:49 crc kubenswrapper[4861]: I0129 08:10:49.144685 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" event={"ID":"347f68d9-759b-42c9-b01c-1bb2b5eccdd2","Type":"ContainerStarted","Data":"4a9bd686ddf0622bd3fe9dc2a8179ef32233f197ea64acdb6017f62200484eb5"} Jan 29 08:10:49 crc kubenswrapper[4861]: I0129 08:10:49.144944 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:49 crc kubenswrapper[4861]: I0129 08:10:49.197184 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" podStartSLOduration=3.197155538 podStartE2EDuration="3.197155538s" podCreationTimestamp="2026-01-29 08:10:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:49.197002634 +0000 UTC m=+5740.868497221" watchObservedRunningTime="2026-01-29 08:10:49.197155538 +0000 UTC m=+5740.868650135" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.614921 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.686239 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-config-data\") pod \"63a88d68-4cb1-4baa-b8a3-5568f520818e\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.686591 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whtjp\" (UniqueName: \"kubernetes.io/projected/63a88d68-4cb1-4baa-b8a3-5568f520818e-kube-api-access-whtjp\") pod \"63a88d68-4cb1-4baa-b8a3-5568f520818e\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.686752 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a88d68-4cb1-4baa-b8a3-5568f520818e-logs\") pod \"63a88d68-4cb1-4baa-b8a3-5568f520818e\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.686827 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-scripts\") pod \"63a88d68-4cb1-4baa-b8a3-5568f520818e\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.686922 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-combined-ca-bundle\") pod \"63a88d68-4cb1-4baa-b8a3-5568f520818e\" (UID: \"63a88d68-4cb1-4baa-b8a3-5568f520818e\") " Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.689105 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63a88d68-4cb1-4baa-b8a3-5568f520818e-logs" (OuterVolumeSpecName: "logs") pod "63a88d68-4cb1-4baa-b8a3-5568f520818e" (UID: "63a88d68-4cb1-4baa-b8a3-5568f520818e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.690158 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a88d68-4cb1-4baa-b8a3-5568f520818e-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.692027 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-scripts" (OuterVolumeSpecName: "scripts") pod "63a88d68-4cb1-4baa-b8a3-5568f520818e" (UID: "63a88d68-4cb1-4baa-b8a3-5568f520818e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.697575 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63a88d68-4cb1-4baa-b8a3-5568f520818e-kube-api-access-whtjp" (OuterVolumeSpecName: "kube-api-access-whtjp") pod "63a88d68-4cb1-4baa-b8a3-5568f520818e" (UID: "63a88d68-4cb1-4baa-b8a3-5568f520818e"). InnerVolumeSpecName "kube-api-access-whtjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.714542 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-config-data" (OuterVolumeSpecName: "config-data") pod "63a88d68-4cb1-4baa-b8a3-5568f520818e" (UID: "63a88d68-4cb1-4baa-b8a3-5568f520818e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.723518 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63a88d68-4cb1-4baa-b8a3-5568f520818e" (UID: "63a88d68-4cb1-4baa-b8a3-5568f520818e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.792559 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.792735 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.792797 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a88d68-4cb1-4baa-b8a3-5568f520818e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:50 crc kubenswrapper[4861]: I0129 08:10:50.792880 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whtjp\" (UniqueName: \"kubernetes.io/projected/63a88d68-4cb1-4baa-b8a3-5568f520818e-kube-api-access-whtjp\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.169520 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vgw88" event={"ID":"63a88d68-4cb1-4baa-b8a3-5568f520818e","Type":"ContainerDied","Data":"3df4242202d8a2547042b3a91e011fe6018bd6fdaed080d616418009caea68fe"} Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.169914 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3df4242202d8a2547042b3a91e011fe6018bd6fdaed080d616418009caea68fe" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.169646 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vgw88" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.283189 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-59dd9c5cc4-d4jm7"] Jan 29 08:10:51 crc kubenswrapper[4861]: E0129 08:10:51.283590 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a88d68-4cb1-4baa-b8a3-5568f520818e" containerName="placement-db-sync" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.283611 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a88d68-4cb1-4baa-b8a3-5568f520818e" containerName="placement-db-sync" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.283805 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="63a88d68-4cb1-4baa-b8a3-5568f520818e" containerName="placement-db-sync" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.284804 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.288504 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.288805 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.289570 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-thbc8" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.291918 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.309330 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-public-tls-certs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.309364 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.309379 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmqwz\" (UniqueName: \"kubernetes.io/projected/9ef50500-52f6-45bd-b3da-27d20b536b9e-kube-api-access-zmqwz\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.309420 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ef50500-52f6-45bd-b3da-27d20b536b9e-logs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.309475 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-scripts\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.309627 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-combined-ca-bundle\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.309743 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-internal-tls-certs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.309895 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-config-data\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.313087 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-59dd9c5cc4-d4jm7"] Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.411360 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-public-tls-certs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.411412 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmqwz\" (UniqueName: \"kubernetes.io/projected/9ef50500-52f6-45bd-b3da-27d20b536b9e-kube-api-access-zmqwz\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.411446 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ef50500-52f6-45bd-b3da-27d20b536b9e-logs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.411491 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-scripts\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.411573 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-combined-ca-bundle\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.411597 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-internal-tls-certs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.411633 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-config-data\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.412368 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ef50500-52f6-45bd-b3da-27d20b536b9e-logs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.416466 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-combined-ca-bundle\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.422969 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-config-data\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.432633 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-scripts\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.445751 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-internal-tls-certs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.450856 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmqwz\" (UniqueName: \"kubernetes.io/projected/9ef50500-52f6-45bd-b3da-27d20b536b9e-kube-api-access-zmqwz\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.450906 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ef50500-52f6-45bd-b3da-27d20b536b9e-public-tls-certs\") pod \"placement-59dd9c5cc4-d4jm7\" (UID: \"9ef50500-52f6-45bd-b3da-27d20b536b9e\") " pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:51 crc kubenswrapper[4861]: I0129 08:10:51.614940 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:52 crc kubenswrapper[4861]: I0129 08:10:52.155595 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-59dd9c5cc4-d4jm7"] Jan 29 08:10:52 crc kubenswrapper[4861]: W0129 08:10:52.165166 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ef50500_52f6_45bd_b3da_27d20b536b9e.slice/crio-e6226ff6109efbe514f5a968191b7e446fa477f83ea46240b18c9b1547441909 WatchSource:0}: Error finding container e6226ff6109efbe514f5a968191b7e446fa477f83ea46240b18c9b1547441909: Status 404 returned error can't find the container with id e6226ff6109efbe514f5a968191b7e446fa477f83ea46240b18c9b1547441909 Jan 29 08:10:52 crc kubenswrapper[4861]: I0129 08:10:52.178446 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-59dd9c5cc4-d4jm7" event={"ID":"9ef50500-52f6-45bd-b3da-27d20b536b9e","Type":"ContainerStarted","Data":"e6226ff6109efbe514f5a968191b7e446fa477f83ea46240b18c9b1547441909"} Jan 29 08:10:53 crc kubenswrapper[4861]: I0129 08:10:53.187368 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-59dd9c5cc4-d4jm7" event={"ID":"9ef50500-52f6-45bd-b3da-27d20b536b9e","Type":"ContainerStarted","Data":"b7bdb1aa2ea3ec0c35d8423c4956a4b5f663d98ef0cdb4254431f559c439105d"} Jan 29 08:10:53 crc kubenswrapper[4861]: I0129 08:10:53.187623 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-59dd9c5cc4-d4jm7" event={"ID":"9ef50500-52f6-45bd-b3da-27d20b536b9e","Type":"ContainerStarted","Data":"a714b519b19683e908dd535637ffd89ddabecdd5380a02671ce3ebadeb52aca5"} Jan 29 08:10:53 crc kubenswrapper[4861]: I0129 08:10:53.187649 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:53 crc kubenswrapper[4861]: I0129 08:10:53.187661 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:10:53 crc kubenswrapper[4861]: I0129 08:10:53.225430 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-59dd9c5cc4-d4jm7" podStartSLOduration=2.225411748 podStartE2EDuration="2.225411748s" podCreationTimestamp="2026-01-29 08:10:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:10:53.209694696 +0000 UTC m=+5744.881189263" watchObservedRunningTime="2026-01-29 08:10:53.225411748 +0000 UTC m=+5744.896906305" Jan 29 08:10:54 crc kubenswrapper[4861]: I0129 08:10:54.116773 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:10:54 crc kubenswrapper[4861]: E0129 08:10:54.117118 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:10:56 crc kubenswrapper[4861]: I0129 08:10:56.428422 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:10:56 crc kubenswrapper[4861]: I0129 08:10:56.528425 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c5dc4b95-jz4r4"] Jan 29 08:10:56 crc kubenswrapper[4861]: I0129 08:10:56.528940 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" podUID="151bed27-ae47-4828-b809-03b07d23c8c3" containerName="dnsmasq-dns" containerID="cri-o://87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209" gracePeriod=10 Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.024823 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.136948 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-config\") pod \"151bed27-ae47-4828-b809-03b07d23c8c3\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.137294 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-dns-svc\") pod \"151bed27-ae47-4828-b809-03b07d23c8c3\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.137337 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-nb\") pod \"151bed27-ae47-4828-b809-03b07d23c8c3\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.137364 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-sb\") pod \"151bed27-ae47-4828-b809-03b07d23c8c3\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.137489 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zz6fq\" (UniqueName: \"kubernetes.io/projected/151bed27-ae47-4828-b809-03b07d23c8c3-kube-api-access-zz6fq\") pod \"151bed27-ae47-4828-b809-03b07d23c8c3\" (UID: \"151bed27-ae47-4828-b809-03b07d23c8c3\") " Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.159295 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/151bed27-ae47-4828-b809-03b07d23c8c3-kube-api-access-zz6fq" (OuterVolumeSpecName: "kube-api-access-zz6fq") pod "151bed27-ae47-4828-b809-03b07d23c8c3" (UID: "151bed27-ae47-4828-b809-03b07d23c8c3"). InnerVolumeSpecName "kube-api-access-zz6fq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.200572 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "151bed27-ae47-4828-b809-03b07d23c8c3" (UID: "151bed27-ae47-4828-b809-03b07d23c8c3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.202922 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "151bed27-ae47-4828-b809-03b07d23c8c3" (UID: "151bed27-ae47-4828-b809-03b07d23c8c3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.210542 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-config" (OuterVolumeSpecName: "config") pod "151bed27-ae47-4828-b809-03b07d23c8c3" (UID: "151bed27-ae47-4828-b809-03b07d23c8c3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.225961 4861 generic.go:334] "Generic (PLEG): container finished" podID="151bed27-ae47-4828-b809-03b07d23c8c3" containerID="87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209" exitCode=0 Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.226004 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" event={"ID":"151bed27-ae47-4828-b809-03b07d23c8c3","Type":"ContainerDied","Data":"87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209"} Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.226029 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" event={"ID":"151bed27-ae47-4828-b809-03b07d23c8c3","Type":"ContainerDied","Data":"b3992b46c944b5b3c994a82ab758665cde3fc193e6e5af78fa4de56439603d0c"} Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.226046 4861 scope.go:117] "RemoveContainer" containerID="87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.226186 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c5dc4b95-jz4r4" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.226913 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "151bed27-ae47-4828-b809-03b07d23c8c3" (UID: "151bed27-ae47-4828-b809-03b07d23c8c3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.241653 4861 scope.go:117] "RemoveContainer" containerID="a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.242653 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.242751 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.242831 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.242887 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zz6fq\" (UniqueName: \"kubernetes.io/projected/151bed27-ae47-4828-b809-03b07d23c8c3-kube-api-access-zz6fq\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.242936 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151bed27-ae47-4828-b809-03b07d23c8c3-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.258223 4861 scope.go:117] "RemoveContainer" containerID="87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209" Jan 29 08:10:57 crc kubenswrapper[4861]: E0129 08:10:57.258645 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209\": container with ID starting with 87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209 not found: ID does not exist" containerID="87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.258675 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209"} err="failed to get container status \"87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209\": rpc error: code = NotFound desc = could not find container \"87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209\": container with ID starting with 87b044128fcd32f3c2ed67bcbcdc2b7d8e1a80326800dfea63f3ab87d05db209 not found: ID does not exist" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.258694 4861 scope.go:117] "RemoveContainer" containerID="a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3" Jan 29 08:10:57 crc kubenswrapper[4861]: E0129 08:10:57.259028 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3\": container with ID starting with a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3 not found: ID does not exist" containerID="a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.259049 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3"} err="failed to get container status \"a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3\": rpc error: code = NotFound desc = could not find container \"a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3\": container with ID starting with a4f313968924846098814f754c0ab6153ff33cd1ec8025204bec4be5e9b715c3 not found: ID does not exist" Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.559385 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c5dc4b95-jz4r4"] Jan 29 08:10:57 crc kubenswrapper[4861]: I0129 08:10:57.572807 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c5dc4b95-jz4r4"] Jan 29 08:10:59 crc kubenswrapper[4861]: I0129 08:10:59.150288 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="151bed27-ae47-4828-b809-03b07d23c8c3" path="/var/lib/kubelet/pods/151bed27-ae47-4828-b809-03b07d23c8c3/volumes" Jan 29 08:11:07 crc kubenswrapper[4861]: I0129 08:11:07.117414 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:11:07 crc kubenswrapper[4861]: E0129 08:11:07.118360 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:11:19 crc kubenswrapper[4861]: I0129 08:11:19.127343 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:11:19 crc kubenswrapper[4861]: E0129 08:11:19.128482 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:11:22 crc kubenswrapper[4861]: I0129 08:11:22.597779 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:11:22 crc kubenswrapper[4861]: I0129 08:11:22.599220 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-59dd9c5cc4-d4jm7" Jan 29 08:11:34 crc kubenswrapper[4861]: I0129 08:11:34.116583 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:11:34 crc kubenswrapper[4861]: E0129 08:11:34.118164 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.116422 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:11:46 crc kubenswrapper[4861]: E0129 08:11:46.117694 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.235566 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-wvqhr"] Jan 29 08:11:46 crc kubenswrapper[4861]: E0129 08:11:46.236609 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151bed27-ae47-4828-b809-03b07d23c8c3" containerName="init" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.236638 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="151bed27-ae47-4828-b809-03b07d23c8c3" containerName="init" Jan 29 08:11:46 crc kubenswrapper[4861]: E0129 08:11:46.236679 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151bed27-ae47-4828-b809-03b07d23c8c3" containerName="dnsmasq-dns" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.236695 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="151bed27-ae47-4828-b809-03b07d23c8c3" containerName="dnsmasq-dns" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.237106 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="151bed27-ae47-4828-b809-03b07d23c8c3" containerName="dnsmasq-dns" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.238881 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.302002 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-wvqhr"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.332608 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98e4a19c-e12f-44b8-9c7c-724acbc661de-operator-scripts\") pod \"nova-api-db-create-wvqhr\" (UID: \"98e4a19c-e12f-44b8-9c7c-724acbc661de\") " pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.332665 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zn2kj\" (UniqueName: \"kubernetes.io/projected/98e4a19c-e12f-44b8-9c7c-724acbc661de-kube-api-access-zn2kj\") pod \"nova-api-db-create-wvqhr\" (UID: \"98e4a19c-e12f-44b8-9c7c-724acbc661de\") " pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.349917 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-9ghkj"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.351026 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.365938 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-9ghkj"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.431129 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7629-account-create-update-ts6d9"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.432307 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.433722 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zn2kj\" (UniqueName: \"kubernetes.io/projected/98e4a19c-e12f-44b8-9c7c-724acbc661de-kube-api-access-zn2kj\") pod \"nova-api-db-create-wvqhr\" (UID: \"98e4a19c-e12f-44b8-9c7c-724acbc661de\") " pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.433779 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-operator-scripts\") pod \"nova-cell0-db-create-9ghkj\" (UID: \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\") " pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.433844 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84fql\" (UniqueName: \"kubernetes.io/projected/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-kube-api-access-84fql\") pod \"nova-cell0-db-create-9ghkj\" (UID: \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\") " pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.433957 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98e4a19c-e12f-44b8-9c7c-724acbc661de-operator-scripts\") pod \"nova-api-db-create-wvqhr\" (UID: \"98e4a19c-e12f-44b8-9c7c-724acbc661de\") " pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.434636 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98e4a19c-e12f-44b8-9c7c-724acbc661de-operator-scripts\") pod \"nova-api-db-create-wvqhr\" (UID: \"98e4a19c-e12f-44b8-9c7c-724acbc661de\") " pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.437118 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.442747 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7629-account-create-update-ts6d9"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.463879 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zn2kj\" (UniqueName: \"kubernetes.io/projected/98e4a19c-e12f-44b8-9c7c-724acbc661de-kube-api-access-zn2kj\") pod \"nova-api-db-create-wvqhr\" (UID: \"98e4a19c-e12f-44b8-9c7c-724acbc661de\") " pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.525602 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-tgkdv"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.527123 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.535339 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2qdm\" (UniqueName: \"kubernetes.io/projected/f8565e37-6564-463f-b39b-9613e4e33d5d-kube-api-access-x2qdm\") pod \"nova-api-7629-account-create-update-ts6d9\" (UID: \"f8565e37-6564-463f-b39b-9613e4e33d5d\") " pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.535440 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-operator-scripts\") pod \"nova-cell0-db-create-9ghkj\" (UID: \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\") " pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.535509 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84fql\" (UniqueName: \"kubernetes.io/projected/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-kube-api-access-84fql\") pod \"nova-cell0-db-create-9ghkj\" (UID: \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\") " pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.535560 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8565e37-6564-463f-b39b-9613e4e33d5d-operator-scripts\") pod \"nova-api-7629-account-create-update-ts6d9\" (UID: \"f8565e37-6564-463f-b39b-9613e4e33d5d\") " pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.536159 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-operator-scripts\") pod \"nova-cell0-db-create-9ghkj\" (UID: \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\") " pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.540375 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-tgkdv"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.559323 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84fql\" (UniqueName: \"kubernetes.io/projected/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-kube-api-access-84fql\") pod \"nova-cell0-db-create-9ghkj\" (UID: \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\") " pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.634963 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.636612 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40f7a1a2-bbca-4911-9337-5d2ce1129c21-operator-scripts\") pod \"nova-cell1-db-create-tgkdv\" (UID: \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\") " pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.636680 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8565e37-6564-463f-b39b-9613e4e33d5d-operator-scripts\") pod \"nova-api-7629-account-create-update-ts6d9\" (UID: \"f8565e37-6564-463f-b39b-9613e4e33d5d\") " pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.636729 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2qdm\" (UniqueName: \"kubernetes.io/projected/f8565e37-6564-463f-b39b-9613e4e33d5d-kube-api-access-x2qdm\") pod \"nova-api-7629-account-create-update-ts6d9\" (UID: \"f8565e37-6564-463f-b39b-9613e4e33d5d\") " pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.636811 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxhj5\" (UniqueName: \"kubernetes.io/projected/40f7a1a2-bbca-4911-9337-5d2ce1129c21-kube-api-access-zxhj5\") pod \"nova-cell1-db-create-tgkdv\" (UID: \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\") " pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.637750 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8565e37-6564-463f-b39b-9613e4e33d5d-operator-scripts\") pod \"nova-api-7629-account-create-update-ts6d9\" (UID: \"f8565e37-6564-463f-b39b-9613e4e33d5d\") " pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.644532 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-41c8-account-create-update-w5rm7"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.645744 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.652006 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.655981 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-41c8-account-create-update-w5rm7"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.658336 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2qdm\" (UniqueName: \"kubernetes.io/projected/f8565e37-6564-463f-b39b-9613e4e33d5d-kube-api-access-x2qdm\") pod \"nova-api-7629-account-create-update-ts6d9\" (UID: \"f8565e37-6564-463f-b39b-9613e4e33d5d\") " pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.673029 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.738612 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f9471d-1f05-4771-9988-dd8882646a84-operator-scripts\") pod \"nova-cell0-41c8-account-create-update-w5rm7\" (UID: \"78f9471d-1f05-4771-9988-dd8882646a84\") " pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.738692 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxhj5\" (UniqueName: \"kubernetes.io/projected/40f7a1a2-bbca-4911-9337-5d2ce1129c21-kube-api-access-zxhj5\") pod \"nova-cell1-db-create-tgkdv\" (UID: \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\") " pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.738761 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40f7a1a2-bbca-4911-9337-5d2ce1129c21-operator-scripts\") pod \"nova-cell1-db-create-tgkdv\" (UID: \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\") " pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.738796 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7675\" (UniqueName: \"kubernetes.io/projected/78f9471d-1f05-4771-9988-dd8882646a84-kube-api-access-w7675\") pod \"nova-cell0-41c8-account-create-update-w5rm7\" (UID: \"78f9471d-1f05-4771-9988-dd8882646a84\") " pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.739826 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40f7a1a2-bbca-4911-9337-5d2ce1129c21-operator-scripts\") pod \"nova-cell1-db-create-tgkdv\" (UID: \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\") " pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.747994 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.765219 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxhj5\" (UniqueName: \"kubernetes.io/projected/40f7a1a2-bbca-4911-9337-5d2ce1129c21-kube-api-access-zxhj5\") pod \"nova-cell1-db-create-tgkdv\" (UID: \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\") " pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.841385 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7675\" (UniqueName: \"kubernetes.io/projected/78f9471d-1f05-4771-9988-dd8882646a84-kube-api-access-w7675\") pod \"nova-cell0-41c8-account-create-update-w5rm7\" (UID: \"78f9471d-1f05-4771-9988-dd8882646a84\") " pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.842553 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f9471d-1f05-4771-9988-dd8882646a84-operator-scripts\") pod \"nova-cell0-41c8-account-create-update-w5rm7\" (UID: \"78f9471d-1f05-4771-9988-dd8882646a84\") " pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.843359 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f9471d-1f05-4771-9988-dd8882646a84-operator-scripts\") pod \"nova-cell0-41c8-account-create-update-w5rm7\" (UID: \"78f9471d-1f05-4771-9988-dd8882646a84\") " pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.848663 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.854451 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-62a6-account-create-update-xvkg5"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.855790 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.858281 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.861810 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7675\" (UniqueName: \"kubernetes.io/projected/78f9471d-1f05-4771-9988-dd8882646a84-kube-api-access-w7675\") pod \"nova-cell0-41c8-account-create-update-w5rm7\" (UID: \"78f9471d-1f05-4771-9988-dd8882646a84\") " pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.869001 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-62a6-account-create-update-xvkg5"] Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.944902 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f072fe6f-71a4-449d-8efe-17d5dad2cd43-operator-scripts\") pod \"nova-cell1-62a6-account-create-update-xvkg5\" (UID: \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\") " pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:46 crc kubenswrapper[4861]: I0129 08:11:46.945187 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pms87\" (UniqueName: \"kubernetes.io/projected/f072fe6f-71a4-449d-8efe-17d5dad2cd43-kube-api-access-pms87\") pod \"nova-cell1-62a6-account-create-update-xvkg5\" (UID: \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\") " pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.044529 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.047066 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pms87\" (UniqueName: \"kubernetes.io/projected/f072fe6f-71a4-449d-8efe-17d5dad2cd43-kube-api-access-pms87\") pod \"nova-cell1-62a6-account-create-update-xvkg5\" (UID: \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\") " pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.047183 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f072fe6f-71a4-449d-8efe-17d5dad2cd43-operator-scripts\") pod \"nova-cell1-62a6-account-create-update-xvkg5\" (UID: \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\") " pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.048105 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f072fe6f-71a4-449d-8efe-17d5dad2cd43-operator-scripts\") pod \"nova-cell1-62a6-account-create-update-xvkg5\" (UID: \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\") " pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.066191 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pms87\" (UniqueName: \"kubernetes.io/projected/f072fe6f-71a4-449d-8efe-17d5dad2cd43-kube-api-access-pms87\") pod \"nova-cell1-62a6-account-create-update-xvkg5\" (UID: \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\") " pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.146091 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-wvqhr"] Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.178734 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.198244 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-9ghkj"] Jan 29 08:11:47 crc kubenswrapper[4861]: W0129 08:11:47.215757 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bf774fb_d1b4_48cf_bd1e_c92e921cad22.slice/crio-549cf6584a453b68cb57bfaef91fbb3f3db920c708f4454558efa86d7ce90364 WatchSource:0}: Error finding container 549cf6584a453b68cb57bfaef91fbb3f3db920c708f4454558efa86d7ce90364: Status 404 returned error can't find the container with id 549cf6584a453b68cb57bfaef91fbb3f3db920c708f4454558efa86d7ce90364 Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.324536 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7629-account-create-update-ts6d9"] Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.387922 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-tgkdv"] Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.477053 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-41c8-account-create-update-w5rm7"] Jan 29 08:11:47 crc kubenswrapper[4861]: W0129 08:11:47.482210 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78f9471d_1f05_4771_9988_dd8882646a84.slice/crio-0701d3c247c41442e03323d7e33cd765ccb4c73511cd9701ca5dee6bf5c98e08 WatchSource:0}: Error finding container 0701d3c247c41442e03323d7e33cd765ccb4c73511cd9701ca5dee6bf5c98e08: Status 404 returned error can't find the container with id 0701d3c247c41442e03323d7e33cd765ccb4c73511cd9701ca5dee6bf5c98e08 Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.653427 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-62a6-account-create-update-xvkg5"] Jan 29 08:11:47 crc kubenswrapper[4861]: W0129 08:11:47.675311 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf072fe6f_71a4_449d_8efe_17d5dad2cd43.slice/crio-02e710302e72364bf2cad194d2c9565449b8bebe7f2dd722d62e0eee2c7eabd1 WatchSource:0}: Error finding container 02e710302e72364bf2cad194d2c9565449b8bebe7f2dd722d62e0eee2c7eabd1: Status 404 returned error can't find the container with id 02e710302e72364bf2cad194d2c9565449b8bebe7f2dd722d62e0eee2c7eabd1 Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.801271 4861 generic.go:334] "Generic (PLEG): container finished" podID="0bf774fb-d1b4-48cf-bd1e-c92e921cad22" containerID="333eeff7b98cb94e24451dea310c2f00abe649660fc1dec3488d8a4609c707fb" exitCode=0 Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.801321 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9ghkj" event={"ID":"0bf774fb-d1b4-48cf-bd1e-c92e921cad22","Type":"ContainerDied","Data":"333eeff7b98cb94e24451dea310c2f00abe649660fc1dec3488d8a4609c707fb"} Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.801380 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9ghkj" event={"ID":"0bf774fb-d1b4-48cf-bd1e-c92e921cad22","Type":"ContainerStarted","Data":"549cf6584a453b68cb57bfaef91fbb3f3db920c708f4454558efa86d7ce90364"} Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.805158 4861 generic.go:334] "Generic (PLEG): container finished" podID="98e4a19c-e12f-44b8-9c7c-724acbc661de" containerID="81ae4515af3ae7320f1b837589cc302de121bfbaa7e8e98d7230937f3815e406" exitCode=0 Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.805232 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wvqhr" event={"ID":"98e4a19c-e12f-44b8-9c7c-724acbc661de","Type":"ContainerDied","Data":"81ae4515af3ae7320f1b837589cc302de121bfbaa7e8e98d7230937f3815e406"} Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.805252 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wvqhr" event={"ID":"98e4a19c-e12f-44b8-9c7c-724acbc661de","Type":"ContainerStarted","Data":"24aca5db1084d5bbf60dce95daef289d522e84e77edff740ff7a6ef8489a9b9b"} Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.806894 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" event={"ID":"78f9471d-1f05-4771-9988-dd8882646a84","Type":"ContainerStarted","Data":"0701d3c247c41442e03323d7e33cd765ccb4c73511cd9701ca5dee6bf5c98e08"} Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.808751 4861 generic.go:334] "Generic (PLEG): container finished" podID="f8565e37-6564-463f-b39b-9613e4e33d5d" containerID="1fc84f198244b259b9881d2e1530e45d52061882eee70cbbbb97cc8c1cf2b0f1" exitCode=0 Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.808792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7629-account-create-update-ts6d9" event={"ID":"f8565e37-6564-463f-b39b-9613e4e33d5d","Type":"ContainerDied","Data":"1fc84f198244b259b9881d2e1530e45d52061882eee70cbbbb97cc8c1cf2b0f1"} Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.808808 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7629-account-create-update-ts6d9" event={"ID":"f8565e37-6564-463f-b39b-9613e4e33d5d","Type":"ContainerStarted","Data":"e5e44de05c42b9697f5de70ba79a93273451f3e9e4ba7b7feada1ac12de46dbb"} Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.811411 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" event={"ID":"f072fe6f-71a4-449d-8efe-17d5dad2cd43","Type":"ContainerStarted","Data":"02e710302e72364bf2cad194d2c9565449b8bebe7f2dd722d62e0eee2c7eabd1"} Jan 29 08:11:47 crc kubenswrapper[4861]: I0129 08:11:47.812968 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tgkdv" event={"ID":"40f7a1a2-bbca-4911-9337-5d2ce1129c21","Type":"ContainerStarted","Data":"8148ba8b70a7fc8635f8871802b5766d2c51da1793afa884dac39f57592e5e63"} Jan 29 08:11:48 crc kubenswrapper[4861]: I0129 08:11:48.825810 4861 generic.go:334] "Generic (PLEG): container finished" podID="f072fe6f-71a4-449d-8efe-17d5dad2cd43" containerID="5042cc160120c75223f991c846e077bc26235ddabc9dcb0dc0703f905f43e812" exitCode=0 Jan 29 08:11:48 crc kubenswrapper[4861]: I0129 08:11:48.825906 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" event={"ID":"f072fe6f-71a4-449d-8efe-17d5dad2cd43","Type":"ContainerDied","Data":"5042cc160120c75223f991c846e077bc26235ddabc9dcb0dc0703f905f43e812"} Jan 29 08:11:48 crc kubenswrapper[4861]: I0129 08:11:48.829567 4861 generic.go:334] "Generic (PLEG): container finished" podID="40f7a1a2-bbca-4911-9337-5d2ce1129c21" containerID="9d1bd926bcc1bdeaf0acde340562efdec85cdf17ac5e4dc3ff65ce8c6d45483b" exitCode=0 Jan 29 08:11:48 crc kubenswrapper[4861]: I0129 08:11:48.829631 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tgkdv" event={"ID":"40f7a1a2-bbca-4911-9337-5d2ce1129c21","Type":"ContainerDied","Data":"9d1bd926bcc1bdeaf0acde340562efdec85cdf17ac5e4dc3ff65ce8c6d45483b"} Jan 29 08:11:48 crc kubenswrapper[4861]: I0129 08:11:48.832635 4861 generic.go:334] "Generic (PLEG): container finished" podID="78f9471d-1f05-4771-9988-dd8882646a84" containerID="87f702658d1be2b04d8d96489bb5b91b28267a05491dd9d5d89f945726cb9bee" exitCode=0 Jan 29 08:11:48 crc kubenswrapper[4861]: I0129 08:11:48.832724 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" event={"ID":"78f9471d-1f05-4771-9988-dd8882646a84","Type":"ContainerDied","Data":"87f702658d1be2b04d8d96489bb5b91b28267a05491dd9d5d89f945726cb9bee"} Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.311526 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.325708 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.334283 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.412211 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98e4a19c-e12f-44b8-9c7c-724acbc661de-operator-scripts\") pod \"98e4a19c-e12f-44b8-9c7c-724acbc661de\" (UID: \"98e4a19c-e12f-44b8-9c7c-724acbc661de\") " Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.412631 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zn2kj\" (UniqueName: \"kubernetes.io/projected/98e4a19c-e12f-44b8-9c7c-724acbc661de-kube-api-access-zn2kj\") pod \"98e4a19c-e12f-44b8-9c7c-724acbc661de\" (UID: \"98e4a19c-e12f-44b8-9c7c-724acbc661de\") " Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.412842 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98e4a19c-e12f-44b8-9c7c-724acbc661de-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "98e4a19c-e12f-44b8-9c7c-724acbc661de" (UID: "98e4a19c-e12f-44b8-9c7c-724acbc661de"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.413355 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98e4a19c-e12f-44b8-9c7c-724acbc661de-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.419136 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98e4a19c-e12f-44b8-9c7c-724acbc661de-kube-api-access-zn2kj" (OuterVolumeSpecName: "kube-api-access-zn2kj") pod "98e4a19c-e12f-44b8-9c7c-724acbc661de" (UID: "98e4a19c-e12f-44b8-9c7c-724acbc661de"). InnerVolumeSpecName "kube-api-access-zn2kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.514438 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-operator-scripts\") pod \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\" (UID: \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\") " Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.514550 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2qdm\" (UniqueName: \"kubernetes.io/projected/f8565e37-6564-463f-b39b-9613e4e33d5d-kube-api-access-x2qdm\") pod \"f8565e37-6564-463f-b39b-9613e4e33d5d\" (UID: \"f8565e37-6564-463f-b39b-9613e4e33d5d\") " Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.514703 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8565e37-6564-463f-b39b-9613e4e33d5d-operator-scripts\") pod \"f8565e37-6564-463f-b39b-9613e4e33d5d\" (UID: \"f8565e37-6564-463f-b39b-9613e4e33d5d\") " Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.514914 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84fql\" (UniqueName: \"kubernetes.io/projected/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-kube-api-access-84fql\") pod \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\" (UID: \"0bf774fb-d1b4-48cf-bd1e-c92e921cad22\") " Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.515151 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0bf774fb-d1b4-48cf-bd1e-c92e921cad22" (UID: "0bf774fb-d1b4-48cf-bd1e-c92e921cad22"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.515448 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8565e37-6564-463f-b39b-9613e4e33d5d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f8565e37-6564-463f-b39b-9613e4e33d5d" (UID: "f8565e37-6564-463f-b39b-9613e4e33d5d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.515757 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8565e37-6564-463f-b39b-9613e4e33d5d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.515784 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zn2kj\" (UniqueName: \"kubernetes.io/projected/98e4a19c-e12f-44b8-9c7c-724acbc661de-kube-api-access-zn2kj\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.515799 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.519732 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8565e37-6564-463f-b39b-9613e4e33d5d-kube-api-access-x2qdm" (OuterVolumeSpecName: "kube-api-access-x2qdm") pod "f8565e37-6564-463f-b39b-9613e4e33d5d" (UID: "f8565e37-6564-463f-b39b-9613e4e33d5d"). InnerVolumeSpecName "kube-api-access-x2qdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.520096 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-kube-api-access-84fql" (OuterVolumeSpecName: "kube-api-access-84fql") pod "0bf774fb-d1b4-48cf-bd1e-c92e921cad22" (UID: "0bf774fb-d1b4-48cf-bd1e-c92e921cad22"). InnerVolumeSpecName "kube-api-access-84fql". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.617124 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2qdm\" (UniqueName: \"kubernetes.io/projected/f8565e37-6564-463f-b39b-9613e4e33d5d-kube-api-access-x2qdm\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.617545 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84fql\" (UniqueName: \"kubernetes.io/projected/0bf774fb-d1b4-48cf-bd1e-c92e921cad22-kube-api-access-84fql\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.849935 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7629-account-create-update-ts6d9" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.849928 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7629-account-create-update-ts6d9" event={"ID":"f8565e37-6564-463f-b39b-9613e4e33d5d","Type":"ContainerDied","Data":"e5e44de05c42b9697f5de70ba79a93273451f3e9e4ba7b7feada1ac12de46dbb"} Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.851449 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5e44de05c42b9697f5de70ba79a93273451f3e9e4ba7b7feada1ac12de46dbb" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.856597 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9ghkj" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.856592 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9ghkj" event={"ID":"0bf774fb-d1b4-48cf-bd1e-c92e921cad22","Type":"ContainerDied","Data":"549cf6584a453b68cb57bfaef91fbb3f3db920c708f4454558efa86d7ce90364"} Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.858234 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="549cf6584a453b68cb57bfaef91fbb3f3db920c708f4454558efa86d7ce90364" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.860024 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wvqhr" Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.862435 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wvqhr" event={"ID":"98e4a19c-e12f-44b8-9c7c-724acbc661de","Type":"ContainerDied","Data":"24aca5db1084d5bbf60dce95daef289d522e84e77edff740ff7a6ef8489a9b9b"} Jan 29 08:11:49 crc kubenswrapper[4861]: I0129 08:11:49.862505 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24aca5db1084d5bbf60dce95daef289d522e84e77edff740ff7a6ef8489a9b9b" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.254140 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.395339 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.400649 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.434127 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7675\" (UniqueName: \"kubernetes.io/projected/78f9471d-1f05-4771-9988-dd8882646a84-kube-api-access-w7675\") pod \"78f9471d-1f05-4771-9988-dd8882646a84\" (UID: \"78f9471d-1f05-4771-9988-dd8882646a84\") " Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.434245 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f9471d-1f05-4771-9988-dd8882646a84-operator-scripts\") pod \"78f9471d-1f05-4771-9988-dd8882646a84\" (UID: \"78f9471d-1f05-4771-9988-dd8882646a84\") " Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.435356 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78f9471d-1f05-4771-9988-dd8882646a84-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "78f9471d-1f05-4771-9988-dd8882646a84" (UID: "78f9471d-1f05-4771-9988-dd8882646a84"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.438213 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78f9471d-1f05-4771-9988-dd8882646a84-kube-api-access-w7675" (OuterVolumeSpecName: "kube-api-access-w7675") pod "78f9471d-1f05-4771-9988-dd8882646a84" (UID: "78f9471d-1f05-4771-9988-dd8882646a84"). InnerVolumeSpecName "kube-api-access-w7675". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.535890 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pms87\" (UniqueName: \"kubernetes.io/projected/f072fe6f-71a4-449d-8efe-17d5dad2cd43-kube-api-access-pms87\") pod \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\" (UID: \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\") " Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.536015 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40f7a1a2-bbca-4911-9337-5d2ce1129c21-operator-scripts\") pod \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\" (UID: \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\") " Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.536066 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxhj5\" (UniqueName: \"kubernetes.io/projected/40f7a1a2-bbca-4911-9337-5d2ce1129c21-kube-api-access-zxhj5\") pod \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\" (UID: \"40f7a1a2-bbca-4911-9337-5d2ce1129c21\") " Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.536122 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f072fe6f-71a4-449d-8efe-17d5dad2cd43-operator-scripts\") pod \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\" (UID: \"f072fe6f-71a4-449d-8efe-17d5dad2cd43\") " Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.536693 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f072fe6f-71a4-449d-8efe-17d5dad2cd43-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f072fe6f-71a4-449d-8efe-17d5dad2cd43" (UID: "f072fe6f-71a4-449d-8efe-17d5dad2cd43"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.536801 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40f7a1a2-bbca-4911-9337-5d2ce1129c21-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "40f7a1a2-bbca-4911-9337-5d2ce1129c21" (UID: "40f7a1a2-bbca-4911-9337-5d2ce1129c21"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.537272 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78f9471d-1f05-4771-9988-dd8882646a84-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.537292 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/40f7a1a2-bbca-4911-9337-5d2ce1129c21-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.537305 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f072fe6f-71a4-449d-8efe-17d5dad2cd43-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.537328 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7675\" (UniqueName: \"kubernetes.io/projected/78f9471d-1f05-4771-9988-dd8882646a84-kube-api-access-w7675\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.538867 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40f7a1a2-bbca-4911-9337-5d2ce1129c21-kube-api-access-zxhj5" (OuterVolumeSpecName: "kube-api-access-zxhj5") pod "40f7a1a2-bbca-4911-9337-5d2ce1129c21" (UID: "40f7a1a2-bbca-4911-9337-5d2ce1129c21"). InnerVolumeSpecName "kube-api-access-zxhj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.540888 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f072fe6f-71a4-449d-8efe-17d5dad2cd43-kube-api-access-pms87" (OuterVolumeSpecName: "kube-api-access-pms87") pod "f072fe6f-71a4-449d-8efe-17d5dad2cd43" (UID: "f072fe6f-71a4-449d-8efe-17d5dad2cd43"). InnerVolumeSpecName "kube-api-access-pms87". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.638903 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pms87\" (UniqueName: \"kubernetes.io/projected/f072fe6f-71a4-449d-8efe-17d5dad2cd43-kube-api-access-pms87\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.638945 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxhj5\" (UniqueName: \"kubernetes.io/projected/40f7a1a2-bbca-4911-9337-5d2ce1129c21-kube-api-access-zxhj5\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.870446 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" event={"ID":"f072fe6f-71a4-449d-8efe-17d5dad2cd43","Type":"ContainerDied","Data":"02e710302e72364bf2cad194d2c9565449b8bebe7f2dd722d62e0eee2c7eabd1"} Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.870490 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02e710302e72364bf2cad194d2c9565449b8bebe7f2dd722d62e0eee2c7eabd1" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.870543 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-62a6-account-create-update-xvkg5" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.872565 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tgkdv" event={"ID":"40f7a1a2-bbca-4911-9337-5d2ce1129c21","Type":"ContainerDied","Data":"8148ba8b70a7fc8635f8871802b5766d2c51da1793afa884dac39f57592e5e63"} Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.872589 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8148ba8b70a7fc8635f8871802b5766d2c51da1793afa884dac39f57592e5e63" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.872594 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tgkdv" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.874232 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" event={"ID":"78f9471d-1f05-4771-9988-dd8882646a84","Type":"ContainerDied","Data":"0701d3c247c41442e03323d7e33cd765ccb4c73511cd9701ca5dee6bf5c98e08"} Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.874258 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0701d3c247c41442e03323d7e33cd765ccb4c73511cd9701ca5dee6bf5c98e08" Jan 29 08:11:50 crc kubenswrapper[4861]: I0129 08:11:50.874324 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-41c8-account-create-update-w5rm7" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.874533 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kfwz9"] Jan 29 08:11:51 crc kubenswrapper[4861]: E0129 08:11:51.875304 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8565e37-6564-463f-b39b-9613e4e33d5d" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875320 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8565e37-6564-463f-b39b-9613e4e33d5d" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: E0129 08:11:51.875338 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f7a1a2-bbca-4911-9337-5d2ce1129c21" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875345 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f7a1a2-bbca-4911-9337-5d2ce1129c21" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: E0129 08:11:51.875361 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f072fe6f-71a4-449d-8efe-17d5dad2cd43" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875368 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f072fe6f-71a4-449d-8efe-17d5dad2cd43" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: E0129 08:11:51.875387 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98e4a19c-e12f-44b8-9c7c-724acbc661de" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875394 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="98e4a19c-e12f-44b8-9c7c-724acbc661de" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: E0129 08:11:51.875417 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bf774fb-d1b4-48cf-bd1e-c92e921cad22" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875423 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bf774fb-d1b4-48cf-bd1e-c92e921cad22" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: E0129 08:11:51.875439 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78f9471d-1f05-4771-9988-dd8882646a84" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875458 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="78f9471d-1f05-4771-9988-dd8882646a84" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875642 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="98e4a19c-e12f-44b8-9c7c-724acbc661de" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875662 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8565e37-6564-463f-b39b-9613e4e33d5d" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875676 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="40f7a1a2-bbca-4911-9337-5d2ce1129c21" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875694 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="78f9471d-1f05-4771-9988-dd8882646a84" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875709 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bf774fb-d1b4-48cf-bd1e-c92e921cad22" containerName="mariadb-database-create" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.875723 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f072fe6f-71a4-449d-8efe-17d5dad2cd43" containerName="mariadb-account-create-update" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.876517 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.880319 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.880327 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.880539 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-6wwdc" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.886646 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kfwz9"] Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.962799 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mvfh\" (UniqueName: \"kubernetes.io/projected/cb01f98a-c389-43d5-b197-fabbed6a8288-kube-api-access-8mvfh\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.962872 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.962955 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-config-data\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:51 crc kubenswrapper[4861]: I0129 08:11:51.963033 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-scripts\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.064201 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-scripts\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.064254 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mvfh\" (UniqueName: \"kubernetes.io/projected/cb01f98a-c389-43d5-b197-fabbed6a8288-kube-api-access-8mvfh\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.064286 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.064343 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-config-data\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.069100 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.069737 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-config-data\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.077924 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-scripts\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.083204 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mvfh\" (UniqueName: \"kubernetes.io/projected/cb01f98a-c389-43d5-b197-fabbed6a8288-kube-api-access-8mvfh\") pod \"nova-cell0-conductor-db-sync-kfwz9\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.201188 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.684539 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kfwz9"] Jan 29 08:11:52 crc kubenswrapper[4861]: W0129 08:11:52.685666 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb01f98a_c389_43d5_b197_fabbed6a8288.slice/crio-6d425cc5c5e826d9b2d07910183ecf30aa8ef2fa24ef52762c2f123759fa0ef1 WatchSource:0}: Error finding container 6d425cc5c5e826d9b2d07910183ecf30aa8ef2fa24ef52762c2f123759fa0ef1: Status 404 returned error can't find the container with id 6d425cc5c5e826d9b2d07910183ecf30aa8ef2fa24ef52762c2f123759fa0ef1 Jan 29 08:11:52 crc kubenswrapper[4861]: I0129 08:11:52.893982 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" event={"ID":"cb01f98a-c389-43d5-b197-fabbed6a8288","Type":"ContainerStarted","Data":"6d425cc5c5e826d9b2d07910183ecf30aa8ef2fa24ef52762c2f123759fa0ef1"} Jan 29 08:11:53 crc kubenswrapper[4861]: I0129 08:11:53.906716 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" event={"ID":"cb01f98a-c389-43d5-b197-fabbed6a8288","Type":"ContainerStarted","Data":"46f146531c9b0cbf14b74e6b0246566222a22b5bf963da1647bfb9ea8d893717"} Jan 29 08:11:53 crc kubenswrapper[4861]: I0129 08:11:53.945033 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" podStartSLOduration=2.945003749 podStartE2EDuration="2.945003749s" podCreationTimestamp="2026-01-29 08:11:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:11:53.928140615 +0000 UTC m=+5805.599635212" watchObservedRunningTime="2026-01-29 08:11:53.945003749 +0000 UTC m=+5805.616498346" Jan 29 08:11:57 crc kubenswrapper[4861]: I0129 08:11:57.953531 4861 generic.go:334] "Generic (PLEG): container finished" podID="cb01f98a-c389-43d5-b197-fabbed6a8288" containerID="46f146531c9b0cbf14b74e6b0246566222a22b5bf963da1647bfb9ea8d893717" exitCode=0 Jan 29 08:11:57 crc kubenswrapper[4861]: I0129 08:11:57.953649 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" event={"ID":"cb01f98a-c389-43d5-b197-fabbed6a8288","Type":"ContainerDied","Data":"46f146531c9b0cbf14b74e6b0246566222a22b5bf963da1647bfb9ea8d893717"} Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.126249 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:11:59 crc kubenswrapper[4861]: E0129 08:11:59.127136 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.396370 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.529671 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-config-data\") pod \"cb01f98a-c389-43d5-b197-fabbed6a8288\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.529852 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mvfh\" (UniqueName: \"kubernetes.io/projected/cb01f98a-c389-43d5-b197-fabbed6a8288-kube-api-access-8mvfh\") pod \"cb01f98a-c389-43d5-b197-fabbed6a8288\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.530176 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-scripts\") pod \"cb01f98a-c389-43d5-b197-fabbed6a8288\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.530228 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-combined-ca-bundle\") pod \"cb01f98a-c389-43d5-b197-fabbed6a8288\" (UID: \"cb01f98a-c389-43d5-b197-fabbed6a8288\") " Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.536783 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-scripts" (OuterVolumeSpecName: "scripts") pod "cb01f98a-c389-43d5-b197-fabbed6a8288" (UID: "cb01f98a-c389-43d5-b197-fabbed6a8288"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.544426 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb01f98a-c389-43d5-b197-fabbed6a8288-kube-api-access-8mvfh" (OuterVolumeSpecName: "kube-api-access-8mvfh") pod "cb01f98a-c389-43d5-b197-fabbed6a8288" (UID: "cb01f98a-c389-43d5-b197-fabbed6a8288"). InnerVolumeSpecName "kube-api-access-8mvfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.587660 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-config-data" (OuterVolumeSpecName: "config-data") pod "cb01f98a-c389-43d5-b197-fabbed6a8288" (UID: "cb01f98a-c389-43d5-b197-fabbed6a8288"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.592683 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb01f98a-c389-43d5-b197-fabbed6a8288" (UID: "cb01f98a-c389-43d5-b197-fabbed6a8288"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.633219 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.633263 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mvfh\" (UniqueName: \"kubernetes.io/projected/cb01f98a-c389-43d5-b197-fabbed6a8288-kube-api-access-8mvfh\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.633273 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:11:59 crc kubenswrapper[4861]: I0129 08:11:59.633281 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb01f98a-c389-43d5-b197-fabbed6a8288-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.029155 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" event={"ID":"cb01f98a-c389-43d5-b197-fabbed6a8288","Type":"ContainerDied","Data":"6d425cc5c5e826d9b2d07910183ecf30aa8ef2fa24ef52762c2f123759fa0ef1"} Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.029236 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d425cc5c5e826d9b2d07910183ecf30aa8ef2fa24ef52762c2f123759fa0ef1" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.029303 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-kfwz9" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.090844 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:12:00 crc kubenswrapper[4861]: E0129 08:12:00.091722 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb01f98a-c389-43d5-b197-fabbed6a8288" containerName="nova-cell0-conductor-db-sync" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.091774 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb01f98a-c389-43d5-b197-fabbed6a8288" containerName="nova-cell0-conductor-db-sync" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.092479 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb01f98a-c389-43d5-b197-fabbed6a8288" containerName="nova-cell0-conductor-db-sync" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.093860 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.096283 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.100716 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-6wwdc" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.103046 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.253855 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.256020 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsmzl\" (UniqueName: \"kubernetes.io/projected/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-kube-api-access-qsmzl\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.256500 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.358152 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.358335 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsmzl\" (UniqueName: \"kubernetes.io/projected/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-kube-api-access-qsmzl\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.358484 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.363986 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.365456 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.382433 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsmzl\" (UniqueName: \"kubernetes.io/projected/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-kube-api-access-qsmzl\") pod \"nova-cell0-conductor-0\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.420644 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:00 crc kubenswrapper[4861]: I0129 08:12:00.938881 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:12:01 crc kubenswrapper[4861]: I0129 08:12:01.042515 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5","Type":"ContainerStarted","Data":"103ca54a8a41f4a71ab03b7a3166eda366770a40a98e8ef6c2e088ee0b7c2b1a"} Jan 29 08:12:02 crc kubenswrapper[4861]: I0129 08:12:02.056507 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5","Type":"ContainerStarted","Data":"7de32205f9a2e36a4c13ff50204796b23961557a0a39dfa34d29db1510f2860a"} Jan 29 08:12:02 crc kubenswrapper[4861]: I0129 08:12:02.056730 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:02 crc kubenswrapper[4861]: I0129 08:12:02.084655 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.084616211 podStartE2EDuration="2.084616211s" podCreationTimestamp="2026-01-29 08:12:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:02.077666118 +0000 UTC m=+5813.749160715" watchObservedRunningTime="2026-01-29 08:12:02.084616211 +0000 UTC m=+5813.756110778" Jan 29 08:12:10 crc kubenswrapper[4861]: I0129 08:12:10.467917 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.044476 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-dz5rn"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.045813 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.048669 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.049184 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.063819 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-dz5rn"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.205268 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zbmz\" (UniqueName: \"kubernetes.io/projected/5ee4d88f-ac71-43c0-819e-3813cde88dc1-kube-api-access-9zbmz\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.205332 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-config-data\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.205515 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.205581 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-scripts\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.251223 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.252596 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.256174 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.264431 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.310256 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.310651 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-scripts\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.310760 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zbmz\" (UniqueName: \"kubernetes.io/projected/5ee4d88f-ac71-43c0-819e-3813cde88dc1-kube-api-access-9zbmz\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.310797 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-config-data\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.319812 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-scripts\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.332851 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.337544 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-config-data\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.345419 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zbmz\" (UniqueName: \"kubernetes.io/projected/5ee4d88f-ac71-43c0-819e-3813cde88dc1-kube-api-access-9zbmz\") pod \"nova-cell0-cell-mapping-dz5rn\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.348554 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.352208 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.356721 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.364882 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.371738 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.372856 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.382120 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.382961 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.417105 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-logs\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.417200 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-config-data\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.417231 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftkgl\" (UniqueName: \"kubernetes.io/projected/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-kube-api-access-ftkgl\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.417278 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.452014 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.493807 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.507278 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.507380 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.514010 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531212 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531264 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-config-data\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531300 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69d77\" (UniqueName: \"kubernetes.io/projected/f15c9c0e-cc86-4134-909b-0f2618fc200a-kube-api-access-69d77\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531324 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftkgl\" (UniqueName: \"kubernetes.io/projected/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-kube-api-access-ftkgl\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531348 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-config-data\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531423 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531446 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531508 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531582 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-logs\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.531624 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2fhn\" (UniqueName: \"kubernetes.io/projected/65159619-fe95-4ca3-903b-45f7923ff326-kube-api-access-t2fhn\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.534500 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-logs\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.560431 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.564649 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftkgl\" (UniqueName: \"kubernetes.io/projected/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-kube-api-access-ftkgl\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.568294 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-config-data\") pod \"nova-api-0\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.571634 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.619654 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54785db6bc-7bmkn"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.627809 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.640701 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54785db6bc-7bmkn"] Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.647849 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-config-data\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.647900 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.647946 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-nb\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.647976 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-config-data\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648007 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-dns-svc\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648031 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648131 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648178 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqqp2\" (UniqueName: \"kubernetes.io/projected/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-kube-api-access-bqqp2\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648223 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-logs\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648263 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2fhn\" (UniqueName: \"kubernetes.io/projected/65159619-fe95-4ca3-903b-45f7923ff326-kube-api-access-t2fhn\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648310 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqvth\" (UniqueName: \"kubernetes.io/projected/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-kube-api-access-sqvth\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648345 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-config\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648404 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648561 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-sb\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.648638 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69d77\" (UniqueName: \"kubernetes.io/projected/f15c9c0e-cc86-4134-909b-0f2618fc200a-kube-api-access-69d77\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.670352 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.676244 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-config-data\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.677453 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.678051 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.712141 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2fhn\" (UniqueName: \"kubernetes.io/projected/65159619-fe95-4ca3-903b-45f7923ff326-kube-api-access-t2fhn\") pod \"nova-scheduler-0\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.712245 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69d77\" (UniqueName: \"kubernetes.io/projected/f15c9c0e-cc86-4134-909b-0f2618fc200a-kube-api-access-69d77\") pod \"nova-cell1-novncproxy-0\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752286 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-config\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752373 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-sb\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752409 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752437 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-nb\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752455 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-config-data\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752478 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-dns-svc\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752539 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqqp2\" (UniqueName: \"kubernetes.io/projected/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-kube-api-access-bqqp2\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752579 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-logs\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.752626 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqvth\" (UniqueName: \"kubernetes.io/projected/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-kube-api-access-sqvth\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.753428 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-config\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.753768 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-logs\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.753439 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-sb\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.753900 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-nb\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.754118 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-dns-svc\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.760759 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.762842 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-config-data\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.775802 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqqp2\" (UniqueName: \"kubernetes.io/projected/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-kube-api-access-bqqp2\") pod \"dnsmasq-dns-54785db6bc-7bmkn\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.777179 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqvth\" (UniqueName: \"kubernetes.io/projected/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-kube-api-access-sqvth\") pod \"nova-metadata-0\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " pod="openstack/nova-metadata-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.814158 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.835167 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:12:11 crc kubenswrapper[4861]: I0129 08:12:11.845279 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.013016 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.066284 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-dz5rn"] Jan 29 08:12:12 crc kubenswrapper[4861]: W0129 08:12:12.074762 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ee4d88f_ac71_43c0_819e_3813cde88dc1.slice/crio-f4b34792f3a52276edc15904033e46ef6a74fed2453b8a0c45ba082b73c68ad0 WatchSource:0}: Error finding container f4b34792f3a52276edc15904033e46ef6a74fed2453b8a0c45ba082b73c68ad0: Status 404 returned error can't find the container with id f4b34792f3a52276edc15904033e46ef6a74fed2453b8a0c45ba082b73c68ad0 Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.165454 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.197333 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dz5rn" event={"ID":"5ee4d88f-ac71-43c0-819e-3813cde88dc1","Type":"ContainerStarted","Data":"f4b34792f3a52276edc15904033e46ef6a74fed2453b8a0c45ba082b73c68ad0"} Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.237486 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-b924h"] Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.238729 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.241485 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.241485 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.257857 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-b924h"] Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.272187 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.272543 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-config-data\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.272587 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mg6h2\" (UniqueName: \"kubernetes.io/projected/51592fcf-703f-426a-b6a0-26404449057b-kube-api-access-mg6h2\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.272694 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-scripts\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.320261 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.374065 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-scripts\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.374217 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.374254 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-config-data\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.374271 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mg6h2\" (UniqueName: \"kubernetes.io/projected/51592fcf-703f-426a-b6a0-26404449057b-kube-api-access-mg6h2\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.377093 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-scripts\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.378525 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-config-data\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.383280 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.401847 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mg6h2\" (UniqueName: \"kubernetes.io/projected/51592fcf-703f-426a-b6a0-26404449057b-kube-api-access-mg6h2\") pod \"nova-cell1-conductor-db-sync-b924h\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.419353 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.431178 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:12 crc kubenswrapper[4861]: W0129 08:12:12.434297 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42243cab_b4fa_4f6a_9f34_4efbeb978dd5.slice/crio-0e475048375c72448e7fe63e5a0d4e97f7d76f6cb96751b4dab690052203b502 WatchSource:0}: Error finding container 0e475048375c72448e7fe63e5a0d4e97f7d76f6cb96751b4dab690052203b502: Status 404 returned error can't find the container with id 0e475048375c72448e7fe63e5a0d4e97f7d76f6cb96751b4dab690052203b502 Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.563903 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:12 crc kubenswrapper[4861]: I0129 08:12:12.628822 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54785db6bc-7bmkn"] Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.034279 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-b924h"] Jan 29 08:12:13 crc kubenswrapper[4861]: W0129 08:12:13.045898 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51592fcf_703f_426a_b6a0_26404449057b.slice/crio-54b8f7fe39c1336becee98f8f203bacd74c8a6b0518bed98838a4239ab5600bf WatchSource:0}: Error finding container 54b8f7fe39c1336becee98f8f203bacd74c8a6b0518bed98838a4239ab5600bf: Status 404 returned error can't find the container with id 54b8f7fe39c1336becee98f8f203bacd74c8a6b0518bed98838a4239ab5600bf Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.205914 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42243cab-b4fa-4f6a-9f34-4efbeb978dd5","Type":"ContainerStarted","Data":"e5f6edb7def145915388ddf4a4b7fdc44f1081226977ef429fc965ba384abcba"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.206343 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42243cab-b4fa-4f6a-9f34-4efbeb978dd5","Type":"ContainerStarted","Data":"f1993a7dbcc29918446168ebfa5fc73df35235932eb52cab15a0144f50241f03"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.206353 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42243cab-b4fa-4f6a-9f34-4efbeb978dd5","Type":"ContainerStarted","Data":"0e475048375c72448e7fe63e5a0d4e97f7d76f6cb96751b4dab690052203b502"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.208882 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dz5rn" event={"ID":"5ee4d88f-ac71-43c0-819e-3813cde88dc1","Type":"ContainerStarted","Data":"babc54de6768e8244f831a4a6e7b4dfca2609331577eb241df28ee9f6b189630"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.210131 4861 generic.go:334] "Generic (PLEG): container finished" podID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" containerID="282a75b799ee1dbe75bd71f4c75d85c603c52882b4b759fa6b4390e90ff6e8c6" exitCode=0 Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.210573 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" event={"ID":"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9","Type":"ContainerDied","Data":"282a75b799ee1dbe75bd71f4c75d85c603c52882b4b759fa6b4390e90ff6e8c6"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.210599 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" event={"ID":"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9","Type":"ContainerStarted","Data":"ea6618d052b830a707fc7e4fff10038a266d07d620397b8901770d2934ff522c"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.214228 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f15c9c0e-cc86-4134-909b-0f2618fc200a","Type":"ContainerStarted","Data":"281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.214267 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f15c9c0e-cc86-4134-909b-0f2618fc200a","Type":"ContainerStarted","Data":"91acc593c22207b8921c107824a3611f980eb7bed464222a9d8d3ab14e2a1057"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.215065 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-b924h" event={"ID":"51592fcf-703f-426a-b6a0-26404449057b","Type":"ContainerStarted","Data":"54b8f7fe39c1336becee98f8f203bacd74c8a6b0518bed98838a4239ab5600bf"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.217675 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"65159619-fe95-4ca3-903b-45f7923ff326","Type":"ContainerStarted","Data":"00f18966cbbdffe9ca54744df540b0b01674a67555d4f476828e73ac3926c204"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.217721 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"65159619-fe95-4ca3-903b-45f7923ff326","Type":"ContainerStarted","Data":"b23e0f43d6c55fbbbd0b367065525a5576f21fd51f27999997bfd3bcf4fe99ca"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.237896 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.237866739 podStartE2EDuration="2.237866739s" podCreationTimestamp="2026-01-29 08:12:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:13.224772514 +0000 UTC m=+5824.896267091" watchObservedRunningTime="2026-01-29 08:12:13.237866739 +0000 UTC m=+5824.909361316" Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.241558 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e","Type":"ContainerStarted","Data":"4ce6d84461c1af92612b00ac764b674bc02cbfde7a1ba4727d00cd1ad0cc99a3"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.241616 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e","Type":"ContainerStarted","Data":"7d39854782ae22bb9734cbcb753438b5faef5b5cb30b26ee9797ec2acc4c8ad6"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.241627 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e","Type":"ContainerStarted","Data":"9537549c8421c8ba778cc5b861f03bbc1876379884a11c394c0f64a4f438040c"} Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.284010 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.283987781 podStartE2EDuration="2.283987781s" podCreationTimestamp="2026-01-29 08:12:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:13.265842754 +0000 UTC m=+5824.937337321" watchObservedRunningTime="2026-01-29 08:12:13.283987781 +0000 UTC m=+5824.955482338" Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.305235 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.30521588 podStartE2EDuration="2.30521588s" podCreationTimestamp="2026-01-29 08:12:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:13.299793977 +0000 UTC m=+5824.971288534" watchObservedRunningTime="2026-01-29 08:12:13.30521588 +0000 UTC m=+5824.976710427" Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.337974 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-dz5rn" podStartSLOduration=2.337947931 podStartE2EDuration="2.337947931s" podCreationTimestamp="2026-01-29 08:12:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:13.318485059 +0000 UTC m=+5824.989979616" watchObservedRunningTime="2026-01-29 08:12:13.337947931 +0000 UTC m=+5825.009442488" Jan 29 08:12:13 crc kubenswrapper[4861]: I0129 08:12:13.445556 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.44552252 podStartE2EDuration="2.44552252s" podCreationTimestamp="2026-01-29 08:12:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:13.396539391 +0000 UTC m=+5825.068033948" watchObservedRunningTime="2026-01-29 08:12:13.44552252 +0000 UTC m=+5825.117017077" Jan 29 08:12:14 crc kubenswrapper[4861]: I0129 08:12:14.117348 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:12:14 crc kubenswrapper[4861]: E0129 08:12:14.117883 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:12:14 crc kubenswrapper[4861]: I0129 08:12:14.255877 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-b924h" event={"ID":"51592fcf-703f-426a-b6a0-26404449057b","Type":"ContainerStarted","Data":"191a977e818785883cdf0b687bec9c15efe4f2173dbbc09289de8ddfbb11217b"} Jan 29 08:12:14 crc kubenswrapper[4861]: I0129 08:12:14.257849 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" event={"ID":"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9","Type":"ContainerStarted","Data":"d637967963a5e919f7aeade81fa19b4ecbd898f64b875ea830d9754fcb7a8d62"} Jan 29 08:12:14 crc kubenswrapper[4861]: I0129 08:12:14.288320 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-b924h" podStartSLOduration=2.288287463 podStartE2EDuration="2.288287463s" podCreationTimestamp="2026-01-29 08:12:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:14.272145488 +0000 UTC m=+5825.943640055" watchObservedRunningTime="2026-01-29 08:12:14.288287463 +0000 UTC m=+5825.959782060" Jan 29 08:12:14 crc kubenswrapper[4861]: I0129 08:12:14.302883 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" podStartSLOduration=3.302859166 podStartE2EDuration="3.302859166s" podCreationTimestamp="2026-01-29 08:12:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:14.293187292 +0000 UTC m=+5825.964681859" watchObservedRunningTime="2026-01-29 08:12:14.302859166 +0000 UTC m=+5825.974353733" Jan 29 08:12:15 crc kubenswrapper[4861]: I0129 08:12:15.265583 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:15 crc kubenswrapper[4861]: I0129 08:12:15.720262 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:15 crc kubenswrapper[4861]: I0129 08:12:15.720734 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="f15c9c0e-cc86-4134-909b-0f2618fc200a" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6" gracePeriod=30 Jan 29 08:12:15 crc kubenswrapper[4861]: I0129 08:12:15.737903 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:15 crc kubenswrapper[4861]: I0129 08:12:15.739170 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerName="nova-metadata-log" containerID="cri-o://f1993a7dbcc29918446168ebfa5fc73df35235932eb52cab15a0144f50241f03" gracePeriod=30 Jan 29 08:12:15 crc kubenswrapper[4861]: I0129 08:12:15.739236 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerName="nova-metadata-metadata" containerID="cri-o://e5f6edb7def145915388ddf4a4b7fdc44f1081226977ef429fc965ba384abcba" gracePeriod=30 Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.276792 4861 generic.go:334] "Generic (PLEG): container finished" podID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerID="e5f6edb7def145915388ddf4a4b7fdc44f1081226977ef429fc965ba384abcba" exitCode=0 Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.277124 4861 generic.go:334] "Generic (PLEG): container finished" podID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerID="f1993a7dbcc29918446168ebfa5fc73df35235932eb52cab15a0144f50241f03" exitCode=143 Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.276859 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42243cab-b4fa-4f6a-9f34-4efbeb978dd5","Type":"ContainerDied","Data":"e5f6edb7def145915388ddf4a4b7fdc44f1081226977ef429fc965ba384abcba"} Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.277255 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42243cab-b4fa-4f6a-9f34-4efbeb978dd5","Type":"ContainerDied","Data":"f1993a7dbcc29918446168ebfa5fc73df35235932eb52cab15a0144f50241f03"} Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.277272 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42243cab-b4fa-4f6a-9f34-4efbeb978dd5","Type":"ContainerDied","Data":"0e475048375c72448e7fe63e5a0d4e97f7d76f6cb96751b4dab690052203b502"} Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.277285 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e475048375c72448e7fe63e5a0d4e97f7d76f6cb96751b4dab690052203b502" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.325125 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.363964 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqvth\" (UniqueName: \"kubernetes.io/projected/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-kube-api-access-sqvth\") pod \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.370461 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-kube-api-access-sqvth" (OuterVolumeSpecName: "kube-api-access-sqvth") pod "42243cab-b4fa-4f6a-9f34-4efbeb978dd5" (UID: "42243cab-b4fa-4f6a-9f34-4efbeb978dd5"). InnerVolumeSpecName "kube-api-access-sqvth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.469336 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-config-data\") pod \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.469534 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-logs\") pod \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.469603 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-combined-ca-bundle\") pod \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\" (UID: \"42243cab-b4fa-4f6a-9f34-4efbeb978dd5\") " Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.470061 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqvth\" (UniqueName: \"kubernetes.io/projected/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-kube-api-access-sqvth\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.476432 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-logs" (OuterVolumeSpecName: "logs") pod "42243cab-b4fa-4f6a-9f34-4efbeb978dd5" (UID: "42243cab-b4fa-4f6a-9f34-4efbeb978dd5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.501231 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42243cab-b4fa-4f6a-9f34-4efbeb978dd5" (UID: "42243cab-b4fa-4f6a-9f34-4efbeb978dd5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.514267 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-config-data" (OuterVolumeSpecName: "config-data") pod "42243cab-b4fa-4f6a-9f34-4efbeb978dd5" (UID: "42243cab-b4fa-4f6a-9f34-4efbeb978dd5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.571918 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.571956 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.571989 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42243cab-b4fa-4f6a-9f34-4efbeb978dd5-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.814754 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:16 crc kubenswrapper[4861]: I0129 08:12:16.837682 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.010457 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.115568 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-config-data\") pod \"f15c9c0e-cc86-4134-909b-0f2618fc200a\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.115796 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69d77\" (UniqueName: \"kubernetes.io/projected/f15c9c0e-cc86-4134-909b-0f2618fc200a-kube-api-access-69d77\") pod \"f15c9c0e-cc86-4134-909b-0f2618fc200a\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.115839 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-combined-ca-bundle\") pod \"f15c9c0e-cc86-4134-909b-0f2618fc200a\" (UID: \"f15c9c0e-cc86-4134-909b-0f2618fc200a\") " Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.120323 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f15c9c0e-cc86-4134-909b-0f2618fc200a-kube-api-access-69d77" (OuterVolumeSpecName: "kube-api-access-69d77") pod "f15c9c0e-cc86-4134-909b-0f2618fc200a" (UID: "f15c9c0e-cc86-4134-909b-0f2618fc200a"). InnerVolumeSpecName "kube-api-access-69d77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.139685 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f15c9c0e-cc86-4134-909b-0f2618fc200a" (UID: "f15c9c0e-cc86-4134-909b-0f2618fc200a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.152325 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-config-data" (OuterVolumeSpecName: "config-data") pod "f15c9c0e-cc86-4134-909b-0f2618fc200a" (UID: "f15c9c0e-cc86-4134-909b-0f2618fc200a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.217769 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69d77\" (UniqueName: \"kubernetes.io/projected/f15c9c0e-cc86-4134-909b-0f2618fc200a-kube-api-access-69d77\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.217809 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.217818 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f15c9c0e-cc86-4134-909b-0f2618fc200a-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.286808 4861 generic.go:334] "Generic (PLEG): container finished" podID="51592fcf-703f-426a-b6a0-26404449057b" containerID="191a977e818785883cdf0b687bec9c15efe4f2173dbbc09289de8ddfbb11217b" exitCode=0 Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.286905 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-b924h" event={"ID":"51592fcf-703f-426a-b6a0-26404449057b","Type":"ContainerDied","Data":"191a977e818785883cdf0b687bec9c15efe4f2173dbbc09289de8ddfbb11217b"} Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.289095 4861 generic.go:334] "Generic (PLEG): container finished" podID="f15c9c0e-cc86-4134-909b-0f2618fc200a" containerID="281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6" exitCode=0 Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.289167 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.289521 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f15c9c0e-cc86-4134-909b-0f2618fc200a","Type":"ContainerDied","Data":"281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6"} Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.289601 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f15c9c0e-cc86-4134-909b-0f2618fc200a","Type":"ContainerDied","Data":"91acc593c22207b8921c107824a3611f980eb7bed464222a9d8d3ab14e2a1057"} Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.289612 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.289638 4861 scope.go:117] "RemoveContainer" containerID="281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.326576 4861 scope.go:117] "RemoveContainer" containerID="281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6" Jan 29 08:12:17 crc kubenswrapper[4861]: E0129 08:12:17.327021 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6\": container with ID starting with 281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6 not found: ID does not exist" containerID="281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.327084 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6"} err="failed to get container status \"281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6\": rpc error: code = NotFound desc = could not find container \"281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6\": container with ID starting with 281ba1dee96bf853010036f54ce2d36a858a639d47887d1d57bac1dcdbacfeb6 not found: ID does not exist" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.332308 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.348150 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.358218 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.367927 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.378810 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:17 crc kubenswrapper[4861]: E0129 08:12:17.379355 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerName="nova-metadata-log" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.379388 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerName="nova-metadata-log" Jan 29 08:12:17 crc kubenswrapper[4861]: E0129 08:12:17.379427 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f15c9c0e-cc86-4134-909b-0f2618fc200a" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.379440 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f15c9c0e-cc86-4134-909b-0f2618fc200a" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 08:12:17 crc kubenswrapper[4861]: E0129 08:12:17.379460 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerName="nova-metadata-metadata" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.379468 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerName="nova-metadata-metadata" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.379722 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f15c9c0e-cc86-4134-909b-0f2618fc200a" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.379750 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerName="nova-metadata-log" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.379769 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" containerName="nova-metadata-metadata" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.381018 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.385491 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.385749 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.385983 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.392702 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.394687 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.398588 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.398703 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.398868 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.399940 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.523357 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmkln\" (UniqueName: \"kubernetes.io/projected/9346b8cf-2d39-4674-a59b-0b6bb04cab33-kube-api-access-kmkln\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.523437 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.523470 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.523782 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-config-data\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.523851 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.523899 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59b2d72c-2acc-43b1-884d-3c5426f9e83e-logs\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.523922 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.523955 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.524042 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.524148 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfqwj\" (UniqueName: \"kubernetes.io/projected/59b2d72c-2acc-43b1-884d-3c5426f9e83e-kube-api-access-gfqwj\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.625846 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.625967 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfqwj\" (UniqueName: \"kubernetes.io/projected/59b2d72c-2acc-43b1-884d-3c5426f9e83e-kube-api-access-gfqwj\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.626001 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmkln\" (UniqueName: \"kubernetes.io/projected/9346b8cf-2d39-4674-a59b-0b6bb04cab33-kube-api-access-kmkln\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.626046 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.626100 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.626157 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-config-data\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.626184 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.626205 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59b2d72c-2acc-43b1-884d-3c5426f9e83e-logs\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.626221 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.626244 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.627648 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59b2d72c-2acc-43b1-884d-3c5426f9e83e-logs\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.630254 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.630377 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.630603 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-config-data\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.631128 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.631601 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.631960 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.632918 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9346b8cf-2d39-4674-a59b-0b6bb04cab33-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.641970 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmkln\" (UniqueName: \"kubernetes.io/projected/9346b8cf-2d39-4674-a59b-0b6bb04cab33-kube-api-access-kmkln\") pod \"nova-cell1-novncproxy-0\" (UID: \"9346b8cf-2d39-4674-a59b-0b6bb04cab33\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.643373 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfqwj\" (UniqueName: \"kubernetes.io/projected/59b2d72c-2acc-43b1-884d-3c5426f9e83e-kube-api-access-gfqwj\") pod \"nova-metadata-0\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.706501 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:17 crc kubenswrapper[4861]: I0129 08:12:17.720250 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.255272 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:18 crc kubenswrapper[4861]: W0129 08:12:18.263742 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59b2d72c_2acc_43b1_884d_3c5426f9e83e.slice/crio-fc4c6873de77446d0cd7e036217c4f6d4f8974941aa0c0b905e56b315aa65815 WatchSource:0}: Error finding container fc4c6873de77446d0cd7e036217c4f6d4f8974941aa0c0b905e56b315aa65815: Status 404 returned error can't find the container with id fc4c6873de77446d0cd7e036217c4f6d4f8974941aa0c0b905e56b315aa65815 Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.304374 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"59b2d72c-2acc-43b1-884d-3c5426f9e83e","Type":"ContainerStarted","Data":"fc4c6873de77446d0cd7e036217c4f6d4f8974941aa0c0b905e56b315aa65815"} Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.310631 4861 generic.go:334] "Generic (PLEG): container finished" podID="5ee4d88f-ac71-43c0-819e-3813cde88dc1" containerID="babc54de6768e8244f831a4a6e7b4dfca2609331577eb241df28ee9f6b189630" exitCode=0 Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.310744 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dz5rn" event={"ID":"5ee4d88f-ac71-43c0-819e-3813cde88dc1","Type":"ContainerDied","Data":"babc54de6768e8244f831a4a6e7b4dfca2609331577eb241df28ee9f6b189630"} Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.312207 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 08:12:18 crc kubenswrapper[4861]: W0129 08:12:18.312413 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9346b8cf_2d39_4674_a59b_0b6bb04cab33.slice/crio-873ff69c66c21da340e4ec23f9addac610570b64919dedb538a536dc1550eae1 WatchSource:0}: Error finding container 873ff69c66c21da340e4ec23f9addac610570b64919dedb538a536dc1550eae1: Status 404 returned error can't find the container with id 873ff69c66c21da340e4ec23f9addac610570b64919dedb538a536dc1550eae1 Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.630813 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.750371 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-combined-ca-bundle\") pod \"51592fcf-703f-426a-b6a0-26404449057b\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.751654 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-config-data\") pod \"51592fcf-703f-426a-b6a0-26404449057b\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.751712 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg6h2\" (UniqueName: \"kubernetes.io/projected/51592fcf-703f-426a-b6a0-26404449057b-kube-api-access-mg6h2\") pod \"51592fcf-703f-426a-b6a0-26404449057b\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.751893 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-scripts\") pod \"51592fcf-703f-426a-b6a0-26404449057b\" (UID: \"51592fcf-703f-426a-b6a0-26404449057b\") " Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.756278 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51592fcf-703f-426a-b6a0-26404449057b-kube-api-access-mg6h2" (OuterVolumeSpecName: "kube-api-access-mg6h2") pod "51592fcf-703f-426a-b6a0-26404449057b" (UID: "51592fcf-703f-426a-b6a0-26404449057b"). InnerVolumeSpecName "kube-api-access-mg6h2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.757562 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-scripts" (OuterVolumeSpecName: "scripts") pod "51592fcf-703f-426a-b6a0-26404449057b" (UID: "51592fcf-703f-426a-b6a0-26404449057b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.782946 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "51592fcf-703f-426a-b6a0-26404449057b" (UID: "51592fcf-703f-426a-b6a0-26404449057b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.795214 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-config-data" (OuterVolumeSpecName: "config-data") pod "51592fcf-703f-426a-b6a0-26404449057b" (UID: "51592fcf-703f-426a-b6a0-26404449057b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.854706 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.854752 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg6h2\" (UniqueName: \"kubernetes.io/projected/51592fcf-703f-426a-b6a0-26404449057b-kube-api-access-mg6h2\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.854772 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:18 crc kubenswrapper[4861]: I0129 08:12:18.854787 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51592fcf-703f-426a-b6a0-26404449057b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.130543 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42243cab-b4fa-4f6a-9f34-4efbeb978dd5" path="/var/lib/kubelet/pods/42243cab-b4fa-4f6a-9f34-4efbeb978dd5/volumes" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.131687 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f15c9c0e-cc86-4134-909b-0f2618fc200a" path="/var/lib/kubelet/pods/f15c9c0e-cc86-4134-909b-0f2618fc200a/volumes" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.325510 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"59b2d72c-2acc-43b1-884d-3c5426f9e83e","Type":"ContainerStarted","Data":"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42"} Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.330542 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"59b2d72c-2acc-43b1-884d-3c5426f9e83e","Type":"ContainerStarted","Data":"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f"} Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.334212 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-b924h" event={"ID":"51592fcf-703f-426a-b6a0-26404449057b","Type":"ContainerDied","Data":"54b8f7fe39c1336becee98f8f203bacd74c8a6b0518bed98838a4239ab5600bf"} Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.334266 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54b8f7fe39c1336becee98f8f203bacd74c8a6b0518bed98838a4239ab5600bf" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.334346 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-b924h" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.337867 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9346b8cf-2d39-4674-a59b-0b6bb04cab33","Type":"ContainerStarted","Data":"93e8d6da4b4ef6738334f2d90d712017cce0702989cafad0474db69c39cddb92"} Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.337993 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9346b8cf-2d39-4674-a59b-0b6bb04cab33","Type":"ContainerStarted","Data":"873ff69c66c21da340e4ec23f9addac610570b64919dedb538a536dc1550eae1"} Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.356795 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.356772199 podStartE2EDuration="2.356772199s" podCreationTimestamp="2026-01-29 08:12:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:19.348899902 +0000 UTC m=+5831.020394519" watchObservedRunningTime="2026-01-29 08:12:19.356772199 +0000 UTC m=+5831.028266756" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.414604 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.414568329 podStartE2EDuration="2.414568329s" podCreationTimestamp="2026-01-29 08:12:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:19.384735534 +0000 UTC m=+5831.056230111" watchObservedRunningTime="2026-01-29 08:12:19.414568329 +0000 UTC m=+5831.086062886" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.425337 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:12:19 crc kubenswrapper[4861]: E0129 08:12:19.425690 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51592fcf-703f-426a-b6a0-26404449057b" containerName="nova-cell1-conductor-db-sync" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.425702 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="51592fcf-703f-426a-b6a0-26404449057b" containerName="nova-cell1-conductor-db-sync" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.425909 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="51592fcf-703f-426a-b6a0-26404449057b" containerName="nova-cell1-conductor-db-sync" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.426710 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.426788 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.439450 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.574404 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.574692 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.575046 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4452h\" (UniqueName: \"kubernetes.io/projected/a83392de-0747-452a-a609-8e4c63b0f13e-kube-api-access-4452h\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.676702 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4452h\" (UniqueName: \"kubernetes.io/projected/a83392de-0747-452a-a609-8e4c63b0f13e-kube-api-access-4452h\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.676797 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.676856 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.688239 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.692903 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.695713 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4452h\" (UniqueName: \"kubernetes.io/projected/a83392de-0747-452a-a609-8e4c63b0f13e-kube-api-access-4452h\") pod \"nova-cell1-conductor-0\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.761756 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.762486 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.890038 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-config-data\") pod \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.890597 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-combined-ca-bundle\") pod \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.890639 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zbmz\" (UniqueName: \"kubernetes.io/projected/5ee4d88f-ac71-43c0-819e-3813cde88dc1-kube-api-access-9zbmz\") pod \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.890675 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-scripts\") pod \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\" (UID: \"5ee4d88f-ac71-43c0-819e-3813cde88dc1\") " Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.896180 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ee4d88f-ac71-43c0-819e-3813cde88dc1-kube-api-access-9zbmz" (OuterVolumeSpecName: "kube-api-access-9zbmz") pod "5ee4d88f-ac71-43c0-819e-3813cde88dc1" (UID: "5ee4d88f-ac71-43c0-819e-3813cde88dc1"). InnerVolumeSpecName "kube-api-access-9zbmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.912722 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-scripts" (OuterVolumeSpecName: "scripts") pod "5ee4d88f-ac71-43c0-819e-3813cde88dc1" (UID: "5ee4d88f-ac71-43c0-819e-3813cde88dc1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.918851 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-config-data" (OuterVolumeSpecName: "config-data") pod "5ee4d88f-ac71-43c0-819e-3813cde88dc1" (UID: "5ee4d88f-ac71-43c0-819e-3813cde88dc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.928690 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ee4d88f-ac71-43c0-819e-3813cde88dc1" (UID: "5ee4d88f-ac71-43c0-819e-3813cde88dc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.993646 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.993698 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.993720 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zbmz\" (UniqueName: \"kubernetes.io/projected/5ee4d88f-ac71-43c0-819e-3813cde88dc1-kube-api-access-9zbmz\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:19 crc kubenswrapper[4861]: I0129 08:12:19.993738 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ee4d88f-ac71-43c0-819e-3813cde88dc1-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.266827 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.356965 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a83392de-0747-452a-a609-8e4c63b0f13e","Type":"ContainerStarted","Data":"7c85ece55532297595402ce13987a386451afd5ec7cc9fea3ad3216ebe962100"} Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.362905 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dz5rn" Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.363774 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dz5rn" event={"ID":"5ee4d88f-ac71-43c0-819e-3813cde88dc1","Type":"ContainerDied","Data":"f4b34792f3a52276edc15904033e46ef6a74fed2453b8a0c45ba082b73c68ad0"} Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.363936 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4b34792f3a52276edc15904033e46ef6a74fed2453b8a0c45ba082b73c68ad0" Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.605061 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.605471 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="65159619-fe95-4ca3-903b-45f7923ff326" containerName="nova-scheduler-scheduler" containerID="cri-o://00f18966cbbdffe9ca54744df540b0b01674a67555d4f476828e73ac3926c204" gracePeriod=30 Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.620752 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.621161 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerName="nova-api-log" containerID="cri-o://7d39854782ae22bb9734cbcb753438b5faef5b5cb30b26ee9797ec2acc4c8ad6" gracePeriod=30 Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.621398 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerName="nova-api-api" containerID="cri-o://4ce6d84461c1af92612b00ac764b674bc02cbfde7a1ba4727d00cd1ad0cc99a3" gracePeriod=30 Jan 29 08:12:20 crc kubenswrapper[4861]: I0129 08:12:20.644055 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.373582 4861 generic.go:334] "Generic (PLEG): container finished" podID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerID="4ce6d84461c1af92612b00ac764b674bc02cbfde7a1ba4727d00cd1ad0cc99a3" exitCode=0 Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.374029 4861 generic.go:334] "Generic (PLEG): container finished" podID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerID="7d39854782ae22bb9734cbcb753438b5faef5b5cb30b26ee9797ec2acc4c8ad6" exitCode=143 Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.373678 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e","Type":"ContainerDied","Data":"4ce6d84461c1af92612b00ac764b674bc02cbfde7a1ba4727d00cd1ad0cc99a3"} Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.374174 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e","Type":"ContainerDied","Data":"7d39854782ae22bb9734cbcb753438b5faef5b5cb30b26ee9797ec2acc4c8ad6"} Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.376301 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a83392de-0747-452a-a609-8e4c63b0f13e","Type":"ContainerStarted","Data":"f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f"} Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.376412 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.376886 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerName="nova-metadata-log" containerID="cri-o://0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f" gracePeriod=30 Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.376871 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerName="nova-metadata-metadata" containerID="cri-o://1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42" gracePeriod=30 Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.403261 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.403243648 podStartE2EDuration="2.403243648s" podCreationTimestamp="2026-01-29 08:12:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:21.396893851 +0000 UTC m=+5833.068388408" watchObservedRunningTime="2026-01-29 08:12:21.403243648 +0000 UTC m=+5833.074738205" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.675252 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.845730 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftkgl\" (UniqueName: \"kubernetes.io/projected/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-kube-api-access-ftkgl\") pod \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.845823 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-combined-ca-bundle\") pod \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.845867 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-config-data\") pod \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.845951 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-logs\") pod \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\" (UID: \"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e\") " Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.846708 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-logs" (OuterVolumeSpecName: "logs") pod "534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" (UID: "534f40f9-4c1b-4795-a0ee-8ae9eaefa81e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.853462 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-kube-api-access-ftkgl" (OuterVolumeSpecName: "kube-api-access-ftkgl") pod "534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" (UID: "534f40f9-4c1b-4795-a0ee-8ae9eaefa81e"). InnerVolumeSpecName "kube-api-access-ftkgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.879255 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" (UID: "534f40f9-4c1b-4795-a0ee-8ae9eaefa81e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.887135 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-config-data" (OuterVolumeSpecName: "config-data") pod "534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" (UID: "534f40f9-4c1b-4795-a0ee-8ae9eaefa81e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.948096 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.948203 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.948282 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.948341 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftkgl\" (UniqueName: \"kubernetes.io/projected/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e-kube-api-access-ftkgl\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:21 crc kubenswrapper[4861]: I0129 08:12:21.995516 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.015392 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.104765 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59d96fd4d9-s6dl7"] Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.104979 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" podUID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" containerName="dnsmasq-dns" containerID="cri-o://4a9bd686ddf0622bd3fe9dc2a8179ef32233f197ea64acdb6017f62200484eb5" gracePeriod=10 Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.151739 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-nova-metadata-tls-certs\") pod \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.151907 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfqwj\" (UniqueName: \"kubernetes.io/projected/59b2d72c-2acc-43b1-884d-3c5426f9e83e-kube-api-access-gfqwj\") pod \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.151949 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-config-data\") pod \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.152059 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-combined-ca-bundle\") pod \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.152248 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59b2d72c-2acc-43b1-884d-3c5426f9e83e-logs\") pod \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\" (UID: \"59b2d72c-2acc-43b1-884d-3c5426f9e83e\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.152697 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59b2d72c-2acc-43b1-884d-3c5426f9e83e-logs" (OuterVolumeSpecName: "logs") pod "59b2d72c-2acc-43b1-884d-3c5426f9e83e" (UID: "59b2d72c-2acc-43b1-884d-3c5426f9e83e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.153389 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59b2d72c-2acc-43b1-884d-3c5426f9e83e-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.158249 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59b2d72c-2acc-43b1-884d-3c5426f9e83e-kube-api-access-gfqwj" (OuterVolumeSpecName: "kube-api-access-gfqwj") pod "59b2d72c-2acc-43b1-884d-3c5426f9e83e" (UID: "59b2d72c-2acc-43b1-884d-3c5426f9e83e"). InnerVolumeSpecName "kube-api-access-gfqwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.182118 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-config-data" (OuterVolumeSpecName: "config-data") pod "59b2d72c-2acc-43b1-884d-3c5426f9e83e" (UID: "59b2d72c-2acc-43b1-884d-3c5426f9e83e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.185417 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "59b2d72c-2acc-43b1-884d-3c5426f9e83e" (UID: "59b2d72c-2acc-43b1-884d-3c5426f9e83e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.212512 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "59b2d72c-2acc-43b1-884d-3c5426f9e83e" (UID: "59b2d72c-2acc-43b1-884d-3c5426f9e83e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.256299 4861 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.256365 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfqwj\" (UniqueName: \"kubernetes.io/projected/59b2d72c-2acc-43b1-884d-3c5426f9e83e-kube-api-access-gfqwj\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.256376 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.256386 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59b2d72c-2acc-43b1-884d-3c5426f9e83e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.387924 4861 generic.go:334] "Generic (PLEG): container finished" podID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerID="1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42" exitCode=0 Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.388276 4861 generic.go:334] "Generic (PLEG): container finished" podID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerID="0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f" exitCode=143 Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.387986 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.388006 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"59b2d72c-2acc-43b1-884d-3c5426f9e83e","Type":"ContainerDied","Data":"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42"} Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.389446 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"59b2d72c-2acc-43b1-884d-3c5426f9e83e","Type":"ContainerDied","Data":"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f"} Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.389459 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"59b2d72c-2acc-43b1-884d-3c5426f9e83e","Type":"ContainerDied","Data":"fc4c6873de77446d0cd7e036217c4f6d4f8974941aa0c0b905e56b315aa65815"} Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.389478 4861 scope.go:117] "RemoveContainer" containerID="1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.394040 4861 generic.go:334] "Generic (PLEG): container finished" podID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" containerID="4a9bd686ddf0622bd3fe9dc2a8179ef32233f197ea64acdb6017f62200484eb5" exitCode=0 Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.394104 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" event={"ID":"347f68d9-759b-42c9-b01c-1bb2b5eccdd2","Type":"ContainerDied","Data":"4a9bd686ddf0622bd3fe9dc2a8179ef32233f197ea64acdb6017f62200484eb5"} Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.395721 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"534f40f9-4c1b-4795-a0ee-8ae9eaefa81e","Type":"ContainerDied","Data":"9537549c8421c8ba778cc5b861f03bbc1876379884a11c394c0f64a4f438040c"} Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.395751 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.428020 4861 scope.go:117] "RemoveContainer" containerID="0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.453136 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.461358 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.473093 4861 scope.go:117] "RemoveContainer" containerID="1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42" Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.475217 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42\": container with ID starting with 1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42 not found: ID does not exist" containerID="1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.475257 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42"} err="failed to get container status \"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42\": rpc error: code = NotFound desc = could not find container \"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42\": container with ID starting with 1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42 not found: ID does not exist" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.475286 4861 scope.go:117] "RemoveContainer" containerID="0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f" Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.479364 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f\": container with ID starting with 0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f not found: ID does not exist" containerID="0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.479403 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f"} err="failed to get container status \"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f\": rpc error: code = NotFound desc = could not find container \"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f\": container with ID starting with 0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f not found: ID does not exist" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.479432 4861 scope.go:117] "RemoveContainer" containerID="1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.480204 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42"} err="failed to get container status \"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42\": rpc error: code = NotFound desc = could not find container \"1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42\": container with ID starting with 1ae962a980b95385277ec5ace55602b3b739a908201fd613908d264250d1bc42 not found: ID does not exist" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.480251 4861 scope.go:117] "RemoveContainer" containerID="0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.481361 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f"} err="failed to get container status \"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f\": rpc error: code = NotFound desc = could not find container \"0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f\": container with ID starting with 0d287fdaf8e611f017efb12ee2d49dd2abcb323fc877971948b92f6bc336681f not found: ID does not exist" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.481422 4861 scope.go:117] "RemoveContainer" containerID="4ce6d84461c1af92612b00ac764b674bc02cbfde7a1ba4727d00cd1ad0cc99a3" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.490307 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.502172 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.513924 4861 scope.go:117] "RemoveContainer" containerID="7d39854782ae22bb9734cbcb753438b5faef5b5cb30b26ee9797ec2acc4c8ad6" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.535541 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.536132 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ee4d88f-ac71-43c0-819e-3813cde88dc1" containerName="nova-manage" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536156 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ee4d88f-ac71-43c0-819e-3813cde88dc1" containerName="nova-manage" Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.536180 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerName="nova-api-api" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536187 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerName="nova-api-api" Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.536201 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerName="nova-api-log" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536207 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerName="nova-api-log" Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.536222 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerName="nova-metadata-metadata" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536228 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerName="nova-metadata-metadata" Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.536247 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerName="nova-metadata-log" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536255 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerName="nova-metadata-log" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536416 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerName="nova-metadata-log" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536426 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" containerName="nova-metadata-metadata" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536448 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ee4d88f-ac71-43c0-819e-3813cde88dc1" containerName="nova-manage" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536460 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerName="nova-api-log" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.536489 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" containerName="nova-api-api" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.538353 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.541627 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.541844 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.547043 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.553175 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.582284 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.582694 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" containerName="init" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.582711 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" containerName="init" Jan 29 08:12:22 crc kubenswrapper[4861]: E0129 08:12:22.582728 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" containerName="dnsmasq-dns" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.582735 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" containerName="dnsmasq-dns" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.582927 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" containerName="dnsmasq-dns" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.583913 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.587729 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.591452 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.661639 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-nb\") pod \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.661717 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-sb\") pod \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.661905 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nr9r\" (UniqueName: \"kubernetes.io/projected/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-kube-api-access-5nr9r\") pod \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.661946 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-config\") pod \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662005 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-dns-svc\") pod \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\" (UID: \"347f68d9-759b-42c9-b01c-1bb2b5eccdd2\") " Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662361 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662394 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662422 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frjdm\" (UniqueName: \"kubernetes.io/projected/00972d44-1efa-46f6-9a32-84d3af5919d8-kube-api-access-frjdm\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662449 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662507 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-logs\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662781 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-config-data\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662875 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl6v5\" (UniqueName: \"kubernetes.io/projected/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-kube-api-access-vl6v5\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.662983 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00972d44-1efa-46f6-9a32-84d3af5919d8-logs\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.663014 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-config-data\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.666097 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-kube-api-access-5nr9r" (OuterVolumeSpecName: "kube-api-access-5nr9r") pod "347f68d9-759b-42c9-b01c-1bb2b5eccdd2" (UID: "347f68d9-759b-42c9-b01c-1bb2b5eccdd2"). InnerVolumeSpecName "kube-api-access-5nr9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.708300 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-config" (OuterVolumeSpecName: "config") pod "347f68d9-759b-42c9-b01c-1bb2b5eccdd2" (UID: "347f68d9-759b-42c9-b01c-1bb2b5eccdd2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.708960 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "347f68d9-759b-42c9-b01c-1bb2b5eccdd2" (UID: "347f68d9-759b-42c9-b01c-1bb2b5eccdd2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.720637 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.721528 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "347f68d9-759b-42c9-b01c-1bb2b5eccdd2" (UID: "347f68d9-759b-42c9-b01c-1bb2b5eccdd2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.722875 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "347f68d9-759b-42c9-b01c-1bb2b5eccdd2" (UID: "347f68d9-759b-42c9-b01c-1bb2b5eccdd2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764486 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764543 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764578 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frjdm\" (UniqueName: \"kubernetes.io/projected/00972d44-1efa-46f6-9a32-84d3af5919d8-kube-api-access-frjdm\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764633 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764675 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-logs\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764744 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-config-data\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764782 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl6v5\" (UniqueName: \"kubernetes.io/projected/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-kube-api-access-vl6v5\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764833 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00972d44-1efa-46f6-9a32-84d3af5919d8-logs\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764857 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-config-data\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764968 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764982 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.764995 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nr9r\" (UniqueName: \"kubernetes.io/projected/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-kube-api-access-5nr9r\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.765008 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.765021 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/347f68d9-759b-42c9-b01c-1bb2b5eccdd2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.765450 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-logs\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.768640 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.769314 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.769528 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00972d44-1efa-46f6-9a32-84d3af5919d8-logs\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.770142 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-config-data\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.772919 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-config-data\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.780323 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.795731 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frjdm\" (UniqueName: \"kubernetes.io/projected/00972d44-1efa-46f6-9a32-84d3af5919d8-kube-api-access-frjdm\") pod \"nova-metadata-0\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.797304 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl6v5\" (UniqueName: \"kubernetes.io/projected/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-kube-api-access-vl6v5\") pod \"nova-api-0\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " pod="openstack/nova-api-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.869842 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:22 crc kubenswrapper[4861]: I0129 08:12:22.904541 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.134760 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="534f40f9-4c1b-4795-a0ee-8ae9eaefa81e" path="/var/lib/kubelet/pods/534f40f9-4c1b-4795-a0ee-8ae9eaefa81e/volumes" Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.135789 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59b2d72c-2acc-43b1-884d-3c5426f9e83e" path="/var/lib/kubelet/pods/59b2d72c-2acc-43b1-884d-3c5426f9e83e/volumes" Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.306487 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.412258 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff9f1017-77ad-4b64-ba6f-6cb3f5472029","Type":"ContainerStarted","Data":"d619f0cd406e69079e2e6af45004d7371039d5e0fca42129040b0513dc130538"} Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.417456 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.418214 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d96fd4d9-s6dl7" event={"ID":"347f68d9-759b-42c9-b01c-1bb2b5eccdd2","Type":"ContainerDied","Data":"31aedd834e4d2854a232b29afb2ecdff0f317b18d024b2cd2991bdaac65c2726"} Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.418250 4861 scope.go:117] "RemoveContainer" containerID="4a9bd686ddf0622bd3fe9dc2a8179ef32233f197ea64acdb6017f62200484eb5" Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.447455 4861 scope.go:117] "RemoveContainer" containerID="2db902c897091d5709551040dd8dea66e64fb50cb5561a34af724893f05e7e1b" Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.448676 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:23 crc kubenswrapper[4861]: W0129 08:12:23.457420 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00972d44_1efa_46f6_9a32_84d3af5919d8.slice/crio-a5b06f07edf5e7eaec5fb6c608947128d7d03d1a79749041ecf580275f77c539 WatchSource:0}: Error finding container a5b06f07edf5e7eaec5fb6c608947128d7d03d1a79749041ecf580275f77c539: Status 404 returned error can't find the container with id a5b06f07edf5e7eaec5fb6c608947128d7d03d1a79749041ecf580275f77c539 Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.459862 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59d96fd4d9-s6dl7"] Jan 29 08:12:23 crc kubenswrapper[4861]: I0129 08:12:23.474626 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59d96fd4d9-s6dl7"] Jan 29 08:12:24 crc kubenswrapper[4861]: I0129 08:12:24.433307 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"00972d44-1efa-46f6-9a32-84d3af5919d8","Type":"ContainerStarted","Data":"78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844"} Jan 29 08:12:24 crc kubenswrapper[4861]: I0129 08:12:24.433605 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"00972d44-1efa-46f6-9a32-84d3af5919d8","Type":"ContainerStarted","Data":"911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701"} Jan 29 08:12:24 crc kubenswrapper[4861]: I0129 08:12:24.433617 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"00972d44-1efa-46f6-9a32-84d3af5919d8","Type":"ContainerStarted","Data":"a5b06f07edf5e7eaec5fb6c608947128d7d03d1a79749041ecf580275f77c539"} Jan 29 08:12:24 crc kubenswrapper[4861]: I0129 08:12:24.438777 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff9f1017-77ad-4b64-ba6f-6cb3f5472029","Type":"ContainerStarted","Data":"62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb"} Jan 29 08:12:24 crc kubenswrapper[4861]: I0129 08:12:24.438822 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff9f1017-77ad-4b64-ba6f-6cb3f5472029","Type":"ContainerStarted","Data":"58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2"} Jan 29 08:12:24 crc kubenswrapper[4861]: I0129 08:12:24.475955 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.475931966 podStartE2EDuration="2.475931966s" podCreationTimestamp="2026-01-29 08:12:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:24.460333476 +0000 UTC m=+5836.131828053" watchObservedRunningTime="2026-01-29 08:12:24.475931966 +0000 UTC m=+5836.147426543" Jan 29 08:12:24 crc kubenswrapper[4861]: I0129 08:12:24.496701 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.496674752 podStartE2EDuration="2.496674752s" podCreationTimestamp="2026-01-29 08:12:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:24.484856761 +0000 UTC m=+5836.156351328" watchObservedRunningTime="2026-01-29 08:12:24.496674752 +0000 UTC m=+5836.168169329" Jan 29 08:12:25 crc kubenswrapper[4861]: I0129 08:12:25.130803 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="347f68d9-759b-42c9-b01c-1bb2b5eccdd2" path="/var/lib/kubelet/pods/347f68d9-759b-42c9-b01c-1bb2b5eccdd2/volumes" Jan 29 08:12:27 crc kubenswrapper[4861]: I0129 08:12:27.721372 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:27 crc kubenswrapper[4861]: I0129 08:12:27.793131 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:27 crc kubenswrapper[4861]: I0129 08:12:27.870580 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 08:12:27 crc kubenswrapper[4861]: I0129 08:12:27.871208 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 08:12:28 crc kubenswrapper[4861]: I0129 08:12:28.511791 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 29 08:12:29 crc kubenswrapper[4861]: I0129 08:12:29.124454 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:12:29 crc kubenswrapper[4861]: E0129 08:12:29.124948 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:12:29 crc kubenswrapper[4861]: I0129 08:12:29.793937 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.274425 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-xd7m7"] Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.276362 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.287446 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-xd7m7"] Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.288469 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.288539 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.360518 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-config-data\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.360613 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-scripts\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.360659 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf57b\" (UniqueName: \"kubernetes.io/projected/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-kube-api-access-qf57b\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.360683 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.463176 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf57b\" (UniqueName: \"kubernetes.io/projected/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-kube-api-access-qf57b\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.463243 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.463495 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-config-data\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.463665 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-scripts\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.472180 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-config-data\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.475556 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.476343 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-scripts\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.494095 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf57b\" (UniqueName: \"kubernetes.io/projected/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-kube-api-access-qf57b\") pod \"nova-cell1-cell-mapping-xd7m7\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:30 crc kubenswrapper[4861]: I0129 08:12:30.610558 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:31 crc kubenswrapper[4861]: I0129 08:12:31.134343 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-xd7m7"] Jan 29 08:12:31 crc kubenswrapper[4861]: W0129 08:12:31.138898 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd5cd313_b722_4d0c_a9e7_2c1122c963ab.slice/crio-b0e6426a78978992409bda7cbe623a4e300594ea1891908233be5510646fc821 WatchSource:0}: Error finding container b0e6426a78978992409bda7cbe623a4e300594ea1891908233be5510646fc821: Status 404 returned error can't find the container with id b0e6426a78978992409bda7cbe623a4e300594ea1891908233be5510646fc821 Jan 29 08:12:31 crc kubenswrapper[4861]: I0129 08:12:31.531411 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-xd7m7" event={"ID":"bd5cd313-b722-4d0c-a9e7-2c1122c963ab","Type":"ContainerStarted","Data":"45a9cde1b6b107e4b902f53854d02c94608ec362df26d428dd61902d58efe914"} Jan 29 08:12:31 crc kubenswrapper[4861]: I0129 08:12:31.531874 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-xd7m7" event={"ID":"bd5cd313-b722-4d0c-a9e7-2c1122c963ab","Type":"ContainerStarted","Data":"b0e6426a78978992409bda7cbe623a4e300594ea1891908233be5510646fc821"} Jan 29 08:12:31 crc kubenswrapper[4861]: I0129 08:12:31.560699 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-xd7m7" podStartSLOduration=1.560670215 podStartE2EDuration="1.560670215s" podCreationTimestamp="2026-01-29 08:12:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:31.552019848 +0000 UTC m=+5843.223514455" watchObservedRunningTime="2026-01-29 08:12:31.560670215 +0000 UTC m=+5843.232164792" Jan 29 08:12:32 crc kubenswrapper[4861]: I0129 08:12:32.870921 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 08:12:32 crc kubenswrapper[4861]: I0129 08:12:32.870982 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 08:12:32 crc kubenswrapper[4861]: I0129 08:12:32.905334 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 08:12:32 crc kubenswrapper[4861]: I0129 08:12:32.905406 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 08:12:33 crc kubenswrapper[4861]: I0129 08:12:33.883245 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.93:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:12:33 crc kubenswrapper[4861]: I0129 08:12:33.883284 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.93:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:12:33 crc kubenswrapper[4861]: I0129 08:12:33.987417 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.94:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 08:12:33 crc kubenswrapper[4861]: I0129 08:12:33.987435 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.94:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 08:12:36 crc kubenswrapper[4861]: I0129 08:12:36.588919 4861 generic.go:334] "Generic (PLEG): container finished" podID="bd5cd313-b722-4d0c-a9e7-2c1122c963ab" containerID="45a9cde1b6b107e4b902f53854d02c94608ec362df26d428dd61902d58efe914" exitCode=0 Jan 29 08:12:36 crc kubenswrapper[4861]: I0129 08:12:36.589146 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-xd7m7" event={"ID":"bd5cd313-b722-4d0c-a9e7-2c1122c963ab","Type":"ContainerDied","Data":"45a9cde1b6b107e4b902f53854d02c94608ec362df26d428dd61902d58efe914"} Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.029290 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.134382 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-scripts\") pod \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.134504 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-config-data\") pod \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.134618 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qf57b\" (UniqueName: \"kubernetes.io/projected/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-kube-api-access-qf57b\") pod \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.134785 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-combined-ca-bundle\") pod \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\" (UID: \"bd5cd313-b722-4d0c-a9e7-2c1122c963ab\") " Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.139851 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-scripts" (OuterVolumeSpecName: "scripts") pod "bd5cd313-b722-4d0c-a9e7-2c1122c963ab" (UID: "bd5cd313-b722-4d0c-a9e7-2c1122c963ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.141256 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-kube-api-access-qf57b" (OuterVolumeSpecName: "kube-api-access-qf57b") pod "bd5cd313-b722-4d0c-a9e7-2c1122c963ab" (UID: "bd5cd313-b722-4d0c-a9e7-2c1122c963ab"). InnerVolumeSpecName "kube-api-access-qf57b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.179486 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd5cd313-b722-4d0c-a9e7-2c1122c963ab" (UID: "bd5cd313-b722-4d0c-a9e7-2c1122c963ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.182155 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-config-data" (OuterVolumeSpecName: "config-data") pod "bd5cd313-b722-4d0c-a9e7-2c1122c963ab" (UID: "bd5cd313-b722-4d0c-a9e7-2c1122c963ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.238811 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.238855 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qf57b\" (UniqueName: \"kubernetes.io/projected/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-kube-api-access-qf57b\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.238878 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.238897 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd5cd313-b722-4d0c-a9e7-2c1122c963ab-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.615607 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-xd7m7" event={"ID":"bd5cd313-b722-4d0c-a9e7-2c1122c963ab","Type":"ContainerDied","Data":"b0e6426a78978992409bda7cbe623a4e300594ea1891908233be5510646fc821"} Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.615652 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0e6426a78978992409bda7cbe623a4e300594ea1891908233be5510646fc821" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.615686 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-xd7m7" Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.815858 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.816153 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-log" containerID="cri-o://58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2" gracePeriod=30 Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.816273 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-api" containerID="cri-o://62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb" gracePeriod=30 Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.848995 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.849624 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-log" containerID="cri-o://911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701" gracePeriod=30 Jan 29 08:12:38 crc kubenswrapper[4861]: I0129 08:12:38.849791 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-metadata" containerID="cri-o://78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844" gracePeriod=30 Jan 29 08:12:39 crc kubenswrapper[4861]: I0129 08:12:39.626147 4861 generic.go:334] "Generic (PLEG): container finished" podID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerID="911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701" exitCode=143 Jan 29 08:12:39 crc kubenswrapper[4861]: I0129 08:12:39.626226 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"00972d44-1efa-46f6-9a32-84d3af5919d8","Type":"ContainerDied","Data":"911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701"} Jan 29 08:12:39 crc kubenswrapper[4861]: I0129 08:12:39.629336 4861 generic.go:334] "Generic (PLEG): container finished" podID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerID="58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2" exitCode=143 Jan 29 08:12:39 crc kubenswrapper[4861]: I0129 08:12:39.629379 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff9f1017-77ad-4b64-ba6f-6cb3f5472029","Type":"ContainerDied","Data":"58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2"} Jan 29 08:12:44 crc kubenswrapper[4861]: I0129 08:12:44.117914 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:12:44 crc kubenswrapper[4861]: E0129 08:12:44.118976 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:12:50 crc kubenswrapper[4861]: I0129 08:12:50.746572 4861 generic.go:334] "Generic (PLEG): container finished" podID="65159619-fe95-4ca3-903b-45f7923ff326" containerID="00f18966cbbdffe9ca54744df540b0b01674a67555d4f476828e73ac3926c204" exitCode=137 Jan 29 08:12:50 crc kubenswrapper[4861]: I0129 08:12:50.746756 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"65159619-fe95-4ca3-903b-45f7923ff326","Type":"ContainerDied","Data":"00f18966cbbdffe9ca54744df540b0b01674a67555d4f476828e73ac3926c204"} Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.120527 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.252561 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-combined-ca-bundle\") pod \"65159619-fe95-4ca3-903b-45f7923ff326\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.252705 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2fhn\" (UniqueName: \"kubernetes.io/projected/65159619-fe95-4ca3-903b-45f7923ff326-kube-api-access-t2fhn\") pod \"65159619-fe95-4ca3-903b-45f7923ff326\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.252802 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-config-data\") pod \"65159619-fe95-4ca3-903b-45f7923ff326\" (UID: \"65159619-fe95-4ca3-903b-45f7923ff326\") " Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.259159 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65159619-fe95-4ca3-903b-45f7923ff326-kube-api-access-t2fhn" (OuterVolumeSpecName: "kube-api-access-t2fhn") pod "65159619-fe95-4ca3-903b-45f7923ff326" (UID: "65159619-fe95-4ca3-903b-45f7923ff326"). InnerVolumeSpecName "kube-api-access-t2fhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.280221 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-config-data" (OuterVolumeSpecName: "config-data") pod "65159619-fe95-4ca3-903b-45f7923ff326" (UID: "65159619-fe95-4ca3-903b-45f7923ff326"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.312394 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65159619-fe95-4ca3-903b-45f7923ff326" (UID: "65159619-fe95-4ca3-903b-45f7923ff326"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.355806 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.355864 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65159619-fe95-4ca3-903b-45f7923ff326-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.355888 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2fhn\" (UniqueName: \"kubernetes.io/projected/65159619-fe95-4ca3-903b-45f7923ff326-kube-api-access-t2fhn\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.767149 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"65159619-fe95-4ca3-903b-45f7923ff326","Type":"ContainerDied","Data":"b23e0f43d6c55fbbbd0b367065525a5576f21fd51f27999997bfd3bcf4fe99ca"} Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.767183 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.767882 4861 scope.go:117] "RemoveContainer" containerID="00f18966cbbdffe9ca54744df540b0b01674a67555d4f476828e73ac3926c204" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.822951 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.853612 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.865724 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:51 crc kubenswrapper[4861]: E0129 08:12:51.867132 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65159619-fe95-4ca3-903b-45f7923ff326" containerName="nova-scheduler-scheduler" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.867243 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="65159619-fe95-4ca3-903b-45f7923ff326" containerName="nova-scheduler-scheduler" Jan 29 08:12:51 crc kubenswrapper[4861]: E0129 08:12:51.867425 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd5cd313-b722-4d0c-a9e7-2c1122c963ab" containerName="nova-manage" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.867752 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd5cd313-b722-4d0c-a9e7-2c1122c963ab" containerName="nova-manage" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.868479 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="65159619-fe95-4ca3-903b-45f7923ff326" containerName="nova-scheduler-scheduler" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.868620 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd5cd313-b722-4d0c-a9e7-2c1122c963ab" containerName="nova-manage" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.870931 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.872872 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.875769 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.972060 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwnhq\" (UniqueName: \"kubernetes.io/projected/6775a96e-88ab-471c-9abb-9a981572d243-kube-api-access-jwnhq\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.972163 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-config-data\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:51 crc kubenswrapper[4861]: I0129 08:12:51.972411 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.074485 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.074616 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwnhq\" (UniqueName: \"kubernetes.io/projected/6775a96e-88ab-471c-9abb-9a981572d243-kube-api-access-jwnhq\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.074643 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-config-data\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.079767 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.089755 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-config-data\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.107953 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwnhq\" (UniqueName: \"kubernetes.io/projected/6775a96e-88ab-471c-9abb-9a981572d243-kube-api-access-jwnhq\") pod \"nova-scheduler-0\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " pod="openstack/nova-scheduler-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.205050 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.729619 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.739511 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.745801 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.802692 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6775a96e-88ab-471c-9abb-9a981572d243","Type":"ContainerStarted","Data":"599739d502bb78a0af0a216d714e3423bc5dc3168e733b69f5b56aef8e626963"} Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.806730 4861 generic.go:334] "Generic (PLEG): container finished" podID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerID="62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb" exitCode=0 Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.806778 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff9f1017-77ad-4b64-ba6f-6cb3f5472029","Type":"ContainerDied","Data":"62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb"} Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.806798 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff9f1017-77ad-4b64-ba6f-6cb3f5472029","Type":"ContainerDied","Data":"d619f0cd406e69079e2e6af45004d7371039d5e0fca42129040b0513dc130538"} Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.806816 4861 scope.go:117] "RemoveContainer" containerID="62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.806936 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.820801 4861 generic.go:334] "Generic (PLEG): container finished" podID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerID="78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844" exitCode=0 Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.820854 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"00972d44-1efa-46f6-9a32-84d3af5919d8","Type":"ContainerDied","Data":"78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844"} Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.820889 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"00972d44-1efa-46f6-9a32-84d3af5919d8","Type":"ContainerDied","Data":"a5b06f07edf5e7eaec5fb6c608947128d7d03d1a79749041ecf580275f77c539"} Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.820972 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.842208 4861 scope.go:117] "RemoveContainer" containerID="58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.867915 4861 scope.go:117] "RemoveContainer" containerID="62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb" Jan 29 08:12:52 crc kubenswrapper[4861]: E0129 08:12:52.868683 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb\": container with ID starting with 62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb not found: ID does not exist" containerID="62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.868734 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb"} err="failed to get container status \"62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb\": rpc error: code = NotFound desc = could not find container \"62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb\": container with ID starting with 62ffe34980cca58a69b5a6c5875ee1f6490028ca8eba9692e2b6fea25fec5dbb not found: ID does not exist" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.868761 4861 scope.go:117] "RemoveContainer" containerID="58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2" Jan 29 08:12:52 crc kubenswrapper[4861]: E0129 08:12:52.869173 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2\": container with ID starting with 58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2 not found: ID does not exist" containerID="58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.869215 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2"} err="failed to get container status \"58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2\": rpc error: code = NotFound desc = could not find container \"58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2\": container with ID starting with 58481c8aa19590822790dded5280a8e102e47ae303feb9fbe909ee4bf43872d2 not found: ID does not exist" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.869239 4861 scope.go:117] "RemoveContainer" containerID="78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.888032 4861 scope.go:117] "RemoveContainer" containerID="911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.894091 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-combined-ca-bundle\") pod \"00972d44-1efa-46f6-9a32-84d3af5919d8\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.894143 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-config-data\") pod \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.894181 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-nova-metadata-tls-certs\") pod \"00972d44-1efa-46f6-9a32-84d3af5919d8\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.894256 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-config-data\") pod \"00972d44-1efa-46f6-9a32-84d3af5919d8\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.894297 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-combined-ca-bundle\") pod \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.894942 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frjdm\" (UniqueName: \"kubernetes.io/projected/00972d44-1efa-46f6-9a32-84d3af5919d8-kube-api-access-frjdm\") pod \"00972d44-1efa-46f6-9a32-84d3af5919d8\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.894987 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vl6v5\" (UniqueName: \"kubernetes.io/projected/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-kube-api-access-vl6v5\") pod \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.895083 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-logs\") pod \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\" (UID: \"ff9f1017-77ad-4b64-ba6f-6cb3f5472029\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.895140 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00972d44-1efa-46f6-9a32-84d3af5919d8-logs\") pod \"00972d44-1efa-46f6-9a32-84d3af5919d8\" (UID: \"00972d44-1efa-46f6-9a32-84d3af5919d8\") " Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.896024 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00972d44-1efa-46f6-9a32-84d3af5919d8-logs" (OuterVolumeSpecName: "logs") pod "00972d44-1efa-46f6-9a32-84d3af5919d8" (UID: "00972d44-1efa-46f6-9a32-84d3af5919d8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.896119 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-logs" (OuterVolumeSpecName: "logs") pod "ff9f1017-77ad-4b64-ba6f-6cb3f5472029" (UID: "ff9f1017-77ad-4b64-ba6f-6cb3f5472029"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.899021 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-kube-api-access-vl6v5" (OuterVolumeSpecName: "kube-api-access-vl6v5") pod "ff9f1017-77ad-4b64-ba6f-6cb3f5472029" (UID: "ff9f1017-77ad-4b64-ba6f-6cb3f5472029"). InnerVolumeSpecName "kube-api-access-vl6v5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.910919 4861 scope.go:117] "RemoveContainer" containerID="78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844" Jan 29 08:12:52 crc kubenswrapper[4861]: E0129 08:12:52.912864 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844\": container with ID starting with 78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844 not found: ID does not exist" containerID="78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.912936 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844"} err="failed to get container status \"78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844\": rpc error: code = NotFound desc = could not find container \"78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844\": container with ID starting with 78ce8f4780ecceecfa17b16729df4bc4b6b51cdd1a9fb8899e41e7a9a5b0e844 not found: ID does not exist" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.912979 4861 scope.go:117] "RemoveContainer" containerID="911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.913153 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00972d44-1efa-46f6-9a32-84d3af5919d8-kube-api-access-frjdm" (OuterVolumeSpecName: "kube-api-access-frjdm") pod "00972d44-1efa-46f6-9a32-84d3af5919d8" (UID: "00972d44-1efa-46f6-9a32-84d3af5919d8"). InnerVolumeSpecName "kube-api-access-frjdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: E0129 08:12:52.913389 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701\": container with ID starting with 911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701 not found: ID does not exist" containerID="911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.913432 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701"} err="failed to get container status \"911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701\": rpc error: code = NotFound desc = could not find container \"911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701\": container with ID starting with 911f5ea21ff69876f7645f94977d1c6534be3186776a81259df1d4643908f701 not found: ID does not exist" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.925644 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-config-data" (OuterVolumeSpecName: "config-data") pod "00972d44-1efa-46f6-9a32-84d3af5919d8" (UID: "00972d44-1efa-46f6-9a32-84d3af5919d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.925846 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff9f1017-77ad-4b64-ba6f-6cb3f5472029" (UID: "ff9f1017-77ad-4b64-ba6f-6cb3f5472029"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.930884 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-config-data" (OuterVolumeSpecName: "config-data") pod "ff9f1017-77ad-4b64-ba6f-6cb3f5472029" (UID: "ff9f1017-77ad-4b64-ba6f-6cb3f5472029"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.937602 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00972d44-1efa-46f6-9a32-84d3af5919d8" (UID: "00972d44-1efa-46f6-9a32-84d3af5919d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.962187 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "00972d44-1efa-46f6-9a32-84d3af5919d8" (UID: "00972d44-1efa-46f6-9a32-84d3af5919d8"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997688 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997744 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997775 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frjdm\" (UniqueName: \"kubernetes.io/projected/00972d44-1efa-46f6-9a32-84d3af5919d8-kube-api-access-frjdm\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997801 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vl6v5\" (UniqueName: \"kubernetes.io/projected/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-kube-api-access-vl6v5\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997821 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997842 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00972d44-1efa-46f6-9a32-84d3af5919d8-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997868 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997889 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff9f1017-77ad-4b64-ba6f-6cb3f5472029-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:52 crc kubenswrapper[4861]: I0129 08:12:52.997911 4861 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/00972d44-1efa-46f6-9a32-84d3af5919d8-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.129520 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65159619-fe95-4ca3-903b-45f7923ff326" path="/var/lib/kubelet/pods/65159619-fe95-4ca3-903b-45f7923ff326/volumes" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.239387 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.255184 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.269160 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.277632 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.293139 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:53 crc kubenswrapper[4861]: E0129 08:12:53.293758 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-log" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.293779 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-log" Jan 29 08:12:53 crc kubenswrapper[4861]: E0129 08:12:53.293800 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-api" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.293808 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-api" Jan 29 08:12:53 crc kubenswrapper[4861]: E0129 08:12:53.293823 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-metadata" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.293829 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-metadata" Jan 29 08:12:53 crc kubenswrapper[4861]: E0129 08:12:53.293840 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-log" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.293848 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-log" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.294090 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-api" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.294104 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-log" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.294124 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" containerName="nova-api-log" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.294140 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" containerName="nova-metadata-metadata" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.295510 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.301296 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.308107 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.323801 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.326187 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.329403 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.329426 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.344310 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.407936 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rffwd\" (UniqueName: \"kubernetes.io/projected/41709de8-67d8-4cef-867c-fc819670b416-kube-api-access-rffwd\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.408304 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-config-data\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.408502 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41709de8-67d8-4cef-867c-fc819670b416-logs\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.408654 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.510687 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-logs\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.510804 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-config-data\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.510862 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.510934 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41709de8-67d8-4cef-867c-fc819670b416-logs\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.510995 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.511125 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.511243 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlxqq\" (UniqueName: \"kubernetes.io/projected/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-kube-api-access-xlxqq\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.511329 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rffwd\" (UniqueName: \"kubernetes.io/projected/41709de8-67d8-4cef-867c-fc819670b416-kube-api-access-rffwd\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.511529 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-config-data\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.511877 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41709de8-67d8-4cef-867c-fc819670b416-logs\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.517491 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-config-data\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.529759 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.545809 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rffwd\" (UniqueName: \"kubernetes.io/projected/41709de8-67d8-4cef-867c-fc819670b416-kube-api-access-rffwd\") pod \"nova-api-0\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.613398 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.613554 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.613644 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlxqq\" (UniqueName: \"kubernetes.io/projected/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-kube-api-access-xlxqq\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.614123 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-config-data\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.614384 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-logs\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.614942 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-logs\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.617568 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-config-data\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.618065 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.632914 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.637654 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.644222 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlxqq\" (UniqueName: \"kubernetes.io/projected/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-kube-api-access-xlxqq\") pod \"nova-metadata-0\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " pod="openstack/nova-metadata-0" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.857710 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6775a96e-88ab-471c-9abb-9a981572d243","Type":"ContainerStarted","Data":"4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501"} Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.879878 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.879860414 podStartE2EDuration="2.879860414s" podCreationTimestamp="2026-01-29 08:12:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:53.877349558 +0000 UTC m=+5865.548844125" watchObservedRunningTime="2026-01-29 08:12:53.879860414 +0000 UTC m=+5865.551354971" Jan 29 08:12:53 crc kubenswrapper[4861]: I0129 08:12:53.939942 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.146242 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:12:54 crc kubenswrapper[4861]: W0129 08:12:54.148672 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41709de8_67d8_4cef_867c_fc819670b416.slice/crio-5a7ace8ca5bfe563ef65a822134fec3fe11c30d50286bf2692505b6c6bbca7f5 WatchSource:0}: Error finding container 5a7ace8ca5bfe563ef65a822134fec3fe11c30d50286bf2692505b6c6bbca7f5: Status 404 returned error can't find the container with id 5a7ace8ca5bfe563ef65a822134fec3fe11c30d50286bf2692505b6c6bbca7f5 Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.432685 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.871792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"41709de8-67d8-4cef-867c-fc819670b416","Type":"ContainerStarted","Data":"acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f"} Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.871976 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"41709de8-67d8-4cef-867c-fc819670b416","Type":"ContainerStarted","Data":"022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db"} Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.871998 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"41709de8-67d8-4cef-867c-fc819670b416","Type":"ContainerStarted","Data":"5a7ace8ca5bfe563ef65a822134fec3fe11c30d50286bf2692505b6c6bbca7f5"} Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.874532 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310","Type":"ContainerStarted","Data":"e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70"} Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.874581 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310","Type":"ContainerStarted","Data":"a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028"} Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.874595 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310","Type":"ContainerStarted","Data":"87390450b98be59a9669e95a98f099bec586bebc21fbd6105907cbe28a915fb6"} Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.904493 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.90446843 podStartE2EDuration="1.90446843s" podCreationTimestamp="2026-01-29 08:12:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:54.899201221 +0000 UTC m=+5866.570695808" watchObservedRunningTime="2026-01-29 08:12:54.90446843 +0000 UTC m=+5866.575963007" Jan 29 08:12:54 crc kubenswrapper[4861]: I0129 08:12:54.931591 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.931563672 podStartE2EDuration="1.931563672s" podCreationTimestamp="2026-01-29 08:12:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:12:54.925325748 +0000 UTC m=+5866.596820335" watchObservedRunningTime="2026-01-29 08:12:54.931563672 +0000 UTC m=+5866.603058239" Jan 29 08:12:55 crc kubenswrapper[4861]: I0129 08:12:55.130662 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00972d44-1efa-46f6-9a32-84d3af5919d8" path="/var/lib/kubelet/pods/00972d44-1efa-46f6-9a32-84d3af5919d8/volumes" Jan 29 08:12:55 crc kubenswrapper[4861]: I0129 08:12:55.132065 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff9f1017-77ad-4b64-ba6f-6cb3f5472029" path="/var/lib/kubelet/pods/ff9f1017-77ad-4b64-ba6f-6cb3f5472029/volumes" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.116646 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:12:56 crc kubenswrapper[4861]: E0129 08:12:56.116913 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.368612 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rjlh4"] Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.372271 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.378391 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rjlh4"] Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.470925 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5nsg\" (UniqueName: \"kubernetes.io/projected/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-kube-api-access-g5nsg\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.471388 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-catalog-content\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.471423 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-utilities\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.573625 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5nsg\" (UniqueName: \"kubernetes.io/projected/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-kube-api-access-g5nsg\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.573719 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-catalog-content\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.573747 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-utilities\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.574242 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-utilities\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.574237 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-catalog-content\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.597667 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5nsg\" (UniqueName: \"kubernetes.io/projected/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-kube-api-access-g5nsg\") pod \"redhat-marketplace-rjlh4\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:56 crc kubenswrapper[4861]: I0129 08:12:56.700811 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:12:57 crc kubenswrapper[4861]: W0129 08:12:57.202418 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4e4cb75_3ab1_45fe_a6a4_0eb5a47b0a21.slice/crio-acd856f16385991e7eec2b37b39eea283f4b6a972a8a6bdb15a1a61c8e2a061c WatchSource:0}: Error finding container acd856f16385991e7eec2b37b39eea283f4b6a972a8a6bdb15a1a61c8e2a061c: Status 404 returned error can't find the container with id acd856f16385991e7eec2b37b39eea283f4b6a972a8a6bdb15a1a61c8e2a061c Jan 29 08:12:57 crc kubenswrapper[4861]: I0129 08:12:57.203486 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rjlh4"] Jan 29 08:12:57 crc kubenswrapper[4861]: I0129 08:12:57.206354 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 08:12:57 crc kubenswrapper[4861]: I0129 08:12:57.905679 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerID="13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941" exitCode=0 Jan 29 08:12:57 crc kubenswrapper[4861]: I0129 08:12:57.905744 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rjlh4" event={"ID":"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21","Type":"ContainerDied","Data":"13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941"} Jan 29 08:12:57 crc kubenswrapper[4861]: I0129 08:12:57.905928 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rjlh4" event={"ID":"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21","Type":"ContainerStarted","Data":"acd856f16385991e7eec2b37b39eea283f4b6a972a8a6bdb15a1a61c8e2a061c"} Jan 29 08:12:57 crc kubenswrapper[4861]: I0129 08:12:57.908831 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:12:58 crc kubenswrapper[4861]: I0129 08:12:58.940908 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 08:12:58 crc kubenswrapper[4861]: I0129 08:12:58.940996 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 08:12:59 crc kubenswrapper[4861]: I0129 08:12:59.932389 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerID="f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd" exitCode=0 Jan 29 08:12:59 crc kubenswrapper[4861]: I0129 08:12:59.932451 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rjlh4" event={"ID":"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21","Type":"ContainerDied","Data":"f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd"} Jan 29 08:13:00 crc kubenswrapper[4861]: I0129 08:13:00.945823 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rjlh4" event={"ID":"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21","Type":"ContainerStarted","Data":"c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee"} Jan 29 08:13:00 crc kubenswrapper[4861]: I0129 08:13:00.974689 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rjlh4" podStartSLOduration=2.488377222 podStartE2EDuration="4.974674209s" podCreationTimestamp="2026-01-29 08:12:56 +0000 UTC" firstStartedPulling="2026-01-29 08:12:57.908573054 +0000 UTC m=+5869.580067621" lastFinishedPulling="2026-01-29 08:13:00.394870021 +0000 UTC m=+5872.066364608" observedRunningTime="2026-01-29 08:13:00.971031243 +0000 UTC m=+5872.642525810" watchObservedRunningTime="2026-01-29 08:13:00.974674209 +0000 UTC m=+5872.646168766" Jan 29 08:13:02 crc kubenswrapper[4861]: I0129 08:13:02.205398 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 08:13:02 crc kubenswrapper[4861]: I0129 08:13:02.239231 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 08:13:03 crc kubenswrapper[4861]: I0129 08:13:03.005199 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 08:13:03 crc kubenswrapper[4861]: I0129 08:13:03.618577 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 08:13:03 crc kubenswrapper[4861]: I0129 08:13:03.618653 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 08:13:03 crc kubenswrapper[4861]: I0129 08:13:03.942752 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 08:13:03 crc kubenswrapper[4861]: I0129 08:13:03.942841 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 08:13:04 crc kubenswrapper[4861]: I0129 08:13:04.701368 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.97:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 08:13:04 crc kubenswrapper[4861]: I0129 08:13:04.701309 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.97:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 08:13:04 crc kubenswrapper[4861]: I0129 08:13:04.963315 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.98:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:13:04 crc kubenswrapper[4861]: I0129 08:13:04.963646 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.98:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:13:06 crc kubenswrapper[4861]: I0129 08:13:06.704150 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:13:06 crc kubenswrapper[4861]: I0129 08:13:06.705310 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:13:06 crc kubenswrapper[4861]: I0129 08:13:06.785449 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:13:07 crc kubenswrapper[4861]: I0129 08:13:07.072929 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:13:07 crc kubenswrapper[4861]: I0129 08:13:07.143048 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rjlh4"] Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.021251 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rjlh4" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerName="registry-server" containerID="cri-o://c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee" gracePeriod=2 Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.451305 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.577013 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-utilities\") pod \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.577165 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-catalog-content\") pod \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.577475 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5nsg\" (UniqueName: \"kubernetes.io/projected/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-kube-api-access-g5nsg\") pod \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\" (UID: \"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21\") " Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.579001 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-utilities" (OuterVolumeSpecName: "utilities") pod "f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" (UID: "f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.583151 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-kube-api-access-g5nsg" (OuterVolumeSpecName: "kube-api-access-g5nsg") pod "f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" (UID: "f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21"). InnerVolumeSpecName "kube-api-access-g5nsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.601775 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" (UID: "f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.680866 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.680923 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:09 crc kubenswrapper[4861]: I0129 08:13:09.680951 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5nsg\" (UniqueName: \"kubernetes.io/projected/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21-kube-api-access-g5nsg\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.043701 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerID="c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee" exitCode=0 Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.043771 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rjlh4" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.043780 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rjlh4" event={"ID":"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21","Type":"ContainerDied","Data":"c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee"} Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.043852 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rjlh4" event={"ID":"f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21","Type":"ContainerDied","Data":"acd856f16385991e7eec2b37b39eea283f4b6a972a8a6bdb15a1a61c8e2a061c"} Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.043900 4861 scope.go:117] "RemoveContainer" containerID="c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.087947 4861 scope.go:117] "RemoveContainer" containerID="f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.104624 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rjlh4"] Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.119224 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rjlh4"] Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.125894 4861 scope.go:117] "RemoveContainer" containerID="13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.196619 4861 scope.go:117] "RemoveContainer" containerID="c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee" Jan 29 08:13:10 crc kubenswrapper[4861]: E0129 08:13:10.197509 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee\": container with ID starting with c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee not found: ID does not exist" containerID="c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.197627 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee"} err="failed to get container status \"c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee\": rpc error: code = NotFound desc = could not find container \"c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee\": container with ID starting with c23a9688160a7bfb77085b02389b12a078e37e43f857d6645f0196d08f6a45ee not found: ID does not exist" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.197676 4861 scope.go:117] "RemoveContainer" containerID="f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd" Jan 29 08:13:10 crc kubenswrapper[4861]: E0129 08:13:10.198510 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd\": container with ID starting with f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd not found: ID does not exist" containerID="f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.198630 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd"} err="failed to get container status \"f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd\": rpc error: code = NotFound desc = could not find container \"f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd\": container with ID starting with f530539587e15ea329ec2e8e26e46cdbbb155408ab2debdadb7d8765d233dfdd not found: ID does not exist" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.198671 4861 scope.go:117] "RemoveContainer" containerID="13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941" Jan 29 08:13:10 crc kubenswrapper[4861]: E0129 08:13:10.199602 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941\": container with ID starting with 13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941 not found: ID does not exist" containerID="13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941" Jan 29 08:13:10 crc kubenswrapper[4861]: I0129 08:13:10.199671 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941"} err="failed to get container status \"13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941\": rpc error: code = NotFound desc = could not find container \"13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941\": container with ID starting with 13caa36a86664a642c9d28da2ec42ec36b1a9a76120acf8228ff84c6655bd941 not found: ID does not exist" Jan 29 08:13:11 crc kubenswrapper[4861]: I0129 08:13:11.117338 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:13:11 crc kubenswrapper[4861]: E0129 08:13:11.118373 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:13:11 crc kubenswrapper[4861]: I0129 08:13:11.134768 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" path="/var/lib/kubelet/pods/f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21/volumes" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.451018 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lvjhf"] Jan 29 08:13:12 crc kubenswrapper[4861]: E0129 08:13:12.452647 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerName="extract-utilities" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.452694 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerName="extract-utilities" Jan 29 08:13:12 crc kubenswrapper[4861]: E0129 08:13:12.452735 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerName="extract-content" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.452753 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerName="extract-content" Jan 29 08:13:12 crc kubenswrapper[4861]: E0129 08:13:12.452780 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerName="registry-server" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.452795 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerName="registry-server" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.453205 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4e4cb75-3ab1-45fe-a6a4-0eb5a47b0a21" containerName="registry-server" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.456560 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.483315 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lvjhf"] Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.549461 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-utilities\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.549516 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc6jv\" (UniqueName: \"kubernetes.io/projected/1d8fe23c-9df2-4779-86f1-49056e1e790f-kube-api-access-tc6jv\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.549632 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-catalog-content\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.650931 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-utilities\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.650975 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc6jv\" (UniqueName: \"kubernetes.io/projected/1d8fe23c-9df2-4779-86f1-49056e1e790f-kube-api-access-tc6jv\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.651034 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-catalog-content\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.651847 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-utilities\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.651860 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-catalog-content\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.672747 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc6jv\" (UniqueName: \"kubernetes.io/projected/1d8fe23c-9df2-4779-86f1-49056e1e790f-kube-api-access-tc6jv\") pod \"certified-operators-lvjhf\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:12 crc kubenswrapper[4861]: I0129 08:13:12.779414 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:13 crc kubenswrapper[4861]: I0129 08:13:13.326717 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lvjhf"] Jan 29 08:13:13 crc kubenswrapper[4861]: W0129 08:13:13.330604 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d8fe23c_9df2_4779_86f1_49056e1e790f.slice/crio-0464ce9563a55dfb1389446552ae39fc1489e6def259f83bdd3a876c83cee6d8 WatchSource:0}: Error finding container 0464ce9563a55dfb1389446552ae39fc1489e6def259f83bdd3a876c83cee6d8: Status 404 returned error can't find the container with id 0464ce9563a55dfb1389446552ae39fc1489e6def259f83bdd3a876c83cee6d8 Jan 29 08:13:13 crc kubenswrapper[4861]: I0129 08:13:13.624955 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 08:13:13 crc kubenswrapper[4861]: I0129 08:13:13.626205 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 08:13:13 crc kubenswrapper[4861]: I0129 08:13:13.630117 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 08:13:13 crc kubenswrapper[4861]: I0129 08:13:13.630237 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 08:13:13 crc kubenswrapper[4861]: I0129 08:13:13.947650 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 08:13:13 crc kubenswrapper[4861]: I0129 08:13:13.948927 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 08:13:13 crc kubenswrapper[4861]: I0129 08:13:13.955032 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.091443 4861 generic.go:334] "Generic (PLEG): container finished" podID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerID="5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b" exitCode=0 Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.091493 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvjhf" event={"ID":"1d8fe23c-9df2-4779-86f1-49056e1e790f","Type":"ContainerDied","Data":"5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b"} Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.091538 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvjhf" event={"ID":"1d8fe23c-9df2-4779-86f1-49056e1e790f","Type":"ContainerStarted","Data":"0464ce9563a55dfb1389446552ae39fc1489e6def259f83bdd3a876c83cee6d8"} Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.092567 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.099491 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.116787 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.375200 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ddf5b9dd7-m7g85"] Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.376682 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.387649 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ddf5b9dd7-m7g85"] Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.504160 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-nb\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.504540 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-sb\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.504593 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-config\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.504620 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9m95\" (UniqueName: \"kubernetes.io/projected/fb991421-a937-4891-b21e-cdd66b1675a7-kube-api-access-x9m95\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.504664 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-dns-svc\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.607388 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-sb\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.607538 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-config\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.607621 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9m95\" (UniqueName: \"kubernetes.io/projected/fb991421-a937-4891-b21e-cdd66b1675a7-kube-api-access-x9m95\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.609177 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-dns-svc\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.609241 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-nb\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.609177 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-dns-svc\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.609935 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-nb\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.610025 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-config\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.610269 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-sb\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.630706 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9m95\" (UniqueName: \"kubernetes.io/projected/fb991421-a937-4891-b21e-cdd66b1675a7-kube-api-access-x9m95\") pod \"dnsmasq-dns-5ddf5b9dd7-m7g85\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:14 crc kubenswrapper[4861]: I0129 08:13:14.704844 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:15 crc kubenswrapper[4861]: I0129 08:13:15.101189 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvjhf" event={"ID":"1d8fe23c-9df2-4779-86f1-49056e1e790f","Type":"ContainerStarted","Data":"f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2"} Jan 29 08:13:15 crc kubenswrapper[4861]: I0129 08:13:15.235508 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ddf5b9dd7-m7g85"] Jan 29 08:13:16 crc kubenswrapper[4861]: I0129 08:13:16.110060 4861 generic.go:334] "Generic (PLEG): container finished" podID="fb991421-a937-4891-b21e-cdd66b1675a7" containerID="9d80a4c15f31b5c560542498ca3b6784534955c005e9bcceada650f392d0f1af" exitCode=0 Jan 29 08:13:16 crc kubenswrapper[4861]: I0129 08:13:16.110133 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" event={"ID":"fb991421-a937-4891-b21e-cdd66b1675a7","Type":"ContainerDied","Data":"9d80a4c15f31b5c560542498ca3b6784534955c005e9bcceada650f392d0f1af"} Jan 29 08:13:16 crc kubenswrapper[4861]: I0129 08:13:16.110358 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" event={"ID":"fb991421-a937-4891-b21e-cdd66b1675a7","Type":"ContainerStarted","Data":"1187989f3b03492bc02ef62fdcfb3689d949a3dfc7d6a1fedbb0ccd5ccc5d617"} Jan 29 08:13:16 crc kubenswrapper[4861]: I0129 08:13:16.112764 4861 generic.go:334] "Generic (PLEG): container finished" podID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerID="f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2" exitCode=0 Jan 29 08:13:16 crc kubenswrapper[4861]: I0129 08:13:16.112847 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvjhf" event={"ID":"1d8fe23c-9df2-4779-86f1-49056e1e790f","Type":"ContainerDied","Data":"f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2"} Jan 29 08:13:16 crc kubenswrapper[4861]: I0129 08:13:16.793213 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:13:17 crc kubenswrapper[4861]: I0129 08:13:17.129669 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-log" containerID="cri-o://022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db" gracePeriod=30 Jan 29 08:13:17 crc kubenswrapper[4861]: I0129 08:13:17.130671 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-api" containerID="cri-o://acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f" gracePeriod=30 Jan 29 08:13:17 crc kubenswrapper[4861]: I0129 08:13:17.133383 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:17 crc kubenswrapper[4861]: I0129 08:13:17.133410 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" event={"ID":"fb991421-a937-4891-b21e-cdd66b1675a7","Type":"ContainerStarted","Data":"0877304d792f3e9b53045562e0c60472b6aec3900804c3d2ffa2a76222b55324"} Jan 29 08:13:17 crc kubenswrapper[4861]: I0129 08:13:17.133424 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvjhf" event={"ID":"1d8fe23c-9df2-4779-86f1-49056e1e790f","Type":"ContainerStarted","Data":"b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9"} Jan 29 08:13:17 crc kubenswrapper[4861]: I0129 08:13:17.172654 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" podStartSLOduration=3.172635245 podStartE2EDuration="3.172635245s" podCreationTimestamp="2026-01-29 08:13:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:13:17.16255742 +0000 UTC m=+5888.834051987" watchObservedRunningTime="2026-01-29 08:13:17.172635245 +0000 UTC m=+5888.844129812" Jan 29 08:13:17 crc kubenswrapper[4861]: I0129 08:13:17.199620 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lvjhf" podStartSLOduration=2.740671678 podStartE2EDuration="5.199598454s" podCreationTimestamp="2026-01-29 08:13:12 +0000 UTC" firstStartedPulling="2026-01-29 08:13:14.09357507 +0000 UTC m=+5885.765069637" lastFinishedPulling="2026-01-29 08:13:16.552501856 +0000 UTC m=+5888.223996413" observedRunningTime="2026-01-29 08:13:17.181369135 +0000 UTC m=+5888.852863702" watchObservedRunningTime="2026-01-29 08:13:17.199598454 +0000 UTC m=+5888.871093021" Jan 29 08:13:18 crc kubenswrapper[4861]: I0129 08:13:18.140135 4861 generic.go:334] "Generic (PLEG): container finished" podID="41709de8-67d8-4cef-867c-fc819670b416" containerID="022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db" exitCode=143 Jan 29 08:13:18 crc kubenswrapper[4861]: I0129 08:13:18.140220 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"41709de8-67d8-4cef-867c-fc819670b416","Type":"ContainerDied","Data":"022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db"} Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.772662 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.929724 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-config-data\") pod \"41709de8-67d8-4cef-867c-fc819670b416\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.930426 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41709de8-67d8-4cef-867c-fc819670b416-logs\") pod \"41709de8-67d8-4cef-867c-fc819670b416\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.930595 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rffwd\" (UniqueName: \"kubernetes.io/projected/41709de8-67d8-4cef-867c-fc819670b416-kube-api-access-rffwd\") pod \"41709de8-67d8-4cef-867c-fc819670b416\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.930836 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-combined-ca-bundle\") pod \"41709de8-67d8-4cef-867c-fc819670b416\" (UID: \"41709de8-67d8-4cef-867c-fc819670b416\") " Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.931635 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41709de8-67d8-4cef-867c-fc819670b416-logs" (OuterVolumeSpecName: "logs") pod "41709de8-67d8-4cef-867c-fc819670b416" (UID: "41709de8-67d8-4cef-867c-fc819670b416"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.938378 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41709de8-67d8-4cef-867c-fc819670b416-kube-api-access-rffwd" (OuterVolumeSpecName: "kube-api-access-rffwd") pod "41709de8-67d8-4cef-867c-fc819670b416" (UID: "41709de8-67d8-4cef-867c-fc819670b416"). InnerVolumeSpecName "kube-api-access-rffwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.962115 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-config-data" (OuterVolumeSpecName: "config-data") pod "41709de8-67d8-4cef-867c-fc819670b416" (UID: "41709de8-67d8-4cef-867c-fc819670b416"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:13:20 crc kubenswrapper[4861]: I0129 08:13:20.970887 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41709de8-67d8-4cef-867c-fc819670b416" (UID: "41709de8-67d8-4cef-867c-fc819670b416"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.033270 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.033350 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41709de8-67d8-4cef-867c-fc819670b416-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.033361 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rffwd\" (UniqueName: \"kubernetes.io/projected/41709de8-67d8-4cef-867c-fc819670b416-kube-api-access-rffwd\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.033371 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41709de8-67d8-4cef-867c-fc819670b416-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.177579 4861 generic.go:334] "Generic (PLEG): container finished" podID="41709de8-67d8-4cef-867c-fc819670b416" containerID="acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f" exitCode=0 Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.177654 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"41709de8-67d8-4cef-867c-fc819670b416","Type":"ContainerDied","Data":"acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f"} Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.177902 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"41709de8-67d8-4cef-867c-fc819670b416","Type":"ContainerDied","Data":"5a7ace8ca5bfe563ef65a822134fec3fe11c30d50286bf2692505b6c6bbca7f5"} Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.177928 4861 scope.go:117] "RemoveContainer" containerID="acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.177738 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.229988 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.238412 4861 scope.go:117] "RemoveContainer" containerID="022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.244689 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.255663 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 08:13:21 crc kubenswrapper[4861]: E0129 08:13:21.256386 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-log" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.256412 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-log" Jan 29 08:13:21 crc kubenswrapper[4861]: E0129 08:13:21.256464 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-api" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.256477 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-api" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.256806 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-log" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.256830 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="41709de8-67d8-4cef-867c-fc819670b416" containerName="nova-api-api" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.259708 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.264456 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.264758 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.264961 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.265325 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.292646 4861 scope.go:117] "RemoveContainer" containerID="acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f" Jan 29 08:13:21 crc kubenswrapper[4861]: E0129 08:13:21.293054 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f\": container with ID starting with acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f not found: ID does not exist" containerID="acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.293098 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f"} err="failed to get container status \"acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f\": rpc error: code = NotFound desc = could not find container \"acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f\": container with ID starting with acf42349840a0e4aa020c25e658a0fbf4368a7a4f75e774a363e118299d37e0f not found: ID does not exist" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.293117 4861 scope.go:117] "RemoveContainer" containerID="022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db" Jan 29 08:13:21 crc kubenswrapper[4861]: E0129 08:13:21.293353 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db\": container with ID starting with 022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db not found: ID does not exist" containerID="022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.293376 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db"} err="failed to get container status \"022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db\": rpc error: code = NotFound desc = could not find container \"022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db\": container with ID starting with 022cd4b6b670f6d07507a496ddbb8ab8812312e20dc7e2663d491d0e9122c4db not found: ID does not exist" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.346282 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.346516 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp9f8\" (UniqueName: \"kubernetes.io/projected/14c47dd3-3866-4834-b719-5f8494904ea4-kube-api-access-dp9f8\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.346627 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14c47dd3-3866-4834-b719-5f8494904ea4-logs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.346904 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.346951 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-public-tls-certs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.347036 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-config-data\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.448835 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp9f8\" (UniqueName: \"kubernetes.io/projected/14c47dd3-3866-4834-b719-5f8494904ea4-kube-api-access-dp9f8\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.448999 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14c47dd3-3866-4834-b719-5f8494904ea4-logs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.449185 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.449293 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-public-tls-certs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.449443 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14c47dd3-3866-4834-b719-5f8494904ea4-logs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.449407 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-config-data\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.449697 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.455280 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-config-data\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.455522 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-public-tls-certs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.457343 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.461006 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.465651 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp9f8\" (UniqueName: \"kubernetes.io/projected/14c47dd3-3866-4834-b719-5f8494904ea4-kube-api-access-dp9f8\") pod \"nova-api-0\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " pod="openstack/nova-api-0" Jan 29 08:13:21 crc kubenswrapper[4861]: I0129 08:13:21.589907 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:13:22 crc kubenswrapper[4861]: W0129 08:13:22.099263 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14c47dd3_3866_4834_b719_5f8494904ea4.slice/crio-b48117a68fccef866a6daa85deed4d6fa75cd0de31a894035cbbda9c45f121a5 WatchSource:0}: Error finding container b48117a68fccef866a6daa85deed4d6fa75cd0de31a894035cbbda9c45f121a5: Status 404 returned error can't find the container with id b48117a68fccef866a6daa85deed4d6fa75cd0de31a894035cbbda9c45f121a5 Jan 29 08:13:22 crc kubenswrapper[4861]: I0129 08:13:22.102656 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:13:22 crc kubenswrapper[4861]: I0129 08:13:22.187272 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"14c47dd3-3866-4834-b719-5f8494904ea4","Type":"ContainerStarted","Data":"b48117a68fccef866a6daa85deed4d6fa75cd0de31a894035cbbda9c45f121a5"} Jan 29 08:13:22 crc kubenswrapper[4861]: I0129 08:13:22.780247 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:22 crc kubenswrapper[4861]: I0129 08:13:22.780703 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:22 crc kubenswrapper[4861]: I0129 08:13:22.835674 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:23 crc kubenswrapper[4861]: I0129 08:13:23.127806 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41709de8-67d8-4cef-867c-fc819670b416" path="/var/lib/kubelet/pods/41709de8-67d8-4cef-867c-fc819670b416/volumes" Jan 29 08:13:23 crc kubenswrapper[4861]: I0129 08:13:23.211882 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"14c47dd3-3866-4834-b719-5f8494904ea4","Type":"ContainerStarted","Data":"e894601e5fef0d614b3a9c749f0cd44610e778b1b1c3366627e68905994e0908"} Jan 29 08:13:23 crc kubenswrapper[4861]: I0129 08:13:23.211960 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"14c47dd3-3866-4834-b719-5f8494904ea4","Type":"ContainerStarted","Data":"8d4806e15389b2f60659d50931356fc913f5b09fbe06a4580139e0adf4ebff4c"} Jan 29 08:13:23 crc kubenswrapper[4861]: I0129 08:13:23.242838 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.242822074 podStartE2EDuration="2.242822074s" podCreationTimestamp="2026-01-29 08:13:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:13:23.238067919 +0000 UTC m=+5894.909562486" watchObservedRunningTime="2026-01-29 08:13:23.242822074 +0000 UTC m=+5894.914316631" Jan 29 08:13:23 crc kubenswrapper[4861]: I0129 08:13:23.283836 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:23 crc kubenswrapper[4861]: I0129 08:13:23.361165 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lvjhf"] Jan 29 08:13:24 crc kubenswrapper[4861]: I0129 08:13:24.706434 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:13:24 crc kubenswrapper[4861]: I0129 08:13:24.809176 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54785db6bc-7bmkn"] Jan 29 08:13:24 crc kubenswrapper[4861]: I0129 08:13:24.809600 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" podUID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" containerName="dnsmasq-dns" containerID="cri-o://d637967963a5e919f7aeade81fa19b4ecbd898f64b875ea830d9754fcb7a8d62" gracePeriod=10 Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.236165 4861 generic.go:334] "Generic (PLEG): container finished" podID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" containerID="d637967963a5e919f7aeade81fa19b4ecbd898f64b875ea830d9754fcb7a8d62" exitCode=0 Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.236228 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" event={"ID":"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9","Type":"ContainerDied","Data":"d637967963a5e919f7aeade81fa19b4ecbd898f64b875ea830d9754fcb7a8d62"} Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.236558 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" event={"ID":"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9","Type":"ContainerDied","Data":"ea6618d052b830a707fc7e4fff10038a266d07d620397b8901770d2934ff522c"} Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.236576 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea6618d052b830a707fc7e4fff10038a266d07d620397b8901770d2934ff522c" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.236740 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lvjhf" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerName="registry-server" containerID="cri-o://b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9" gracePeriod=2 Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.414392 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.454326 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-dns-svc\") pod \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.454467 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-config\") pod \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.454586 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-nb\") pod \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.454664 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-sb\") pod \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.454712 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqqp2\" (UniqueName: \"kubernetes.io/projected/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-kube-api-access-bqqp2\") pod \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\" (UID: \"d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9\") " Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.477840 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-kube-api-access-bqqp2" (OuterVolumeSpecName: "kube-api-access-bqqp2") pod "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" (UID: "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9"). InnerVolumeSpecName "kube-api-access-bqqp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.514216 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" (UID: "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.537119 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-config" (OuterVolumeSpecName: "config") pod "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" (UID: "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.538917 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" (UID: "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.539137 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" (UID: "d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.557268 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.557306 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.557321 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqqp2\" (UniqueName: \"kubernetes.io/projected/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-kube-api-access-bqqp2\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.557335 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.557349 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.626398 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.658507 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tc6jv\" (UniqueName: \"kubernetes.io/projected/1d8fe23c-9df2-4779-86f1-49056e1e790f-kube-api-access-tc6jv\") pod \"1d8fe23c-9df2-4779-86f1-49056e1e790f\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.658593 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-catalog-content\") pod \"1d8fe23c-9df2-4779-86f1-49056e1e790f\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.658718 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-utilities\") pod \"1d8fe23c-9df2-4779-86f1-49056e1e790f\" (UID: \"1d8fe23c-9df2-4779-86f1-49056e1e790f\") " Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.661168 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-utilities" (OuterVolumeSpecName: "utilities") pod "1d8fe23c-9df2-4779-86f1-49056e1e790f" (UID: "1d8fe23c-9df2-4779-86f1-49056e1e790f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.663992 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d8fe23c-9df2-4779-86f1-49056e1e790f-kube-api-access-tc6jv" (OuterVolumeSpecName: "kube-api-access-tc6jv") pod "1d8fe23c-9df2-4779-86f1-49056e1e790f" (UID: "1d8fe23c-9df2-4779-86f1-49056e1e790f"). InnerVolumeSpecName "kube-api-access-tc6jv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.761361 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tc6jv\" (UniqueName: \"kubernetes.io/projected/1d8fe23c-9df2-4779-86f1-49056e1e790f-kube-api-access-tc6jv\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.761400 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.934625 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d8fe23c-9df2-4779-86f1-49056e1e790f" (UID: "1d8fe23c-9df2-4779-86f1-49056e1e790f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:13:25 crc kubenswrapper[4861]: I0129 08:13:25.964002 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d8fe23c-9df2-4779-86f1-49056e1e790f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.118987 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:13:26 crc kubenswrapper[4861]: E0129 08:13:26.119377 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.253222 4861 generic.go:334] "Generic (PLEG): container finished" podID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerID="b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9" exitCode=0 Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.253415 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvjhf" event={"ID":"1d8fe23c-9df2-4779-86f1-49056e1e790f","Type":"ContainerDied","Data":"b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9"} Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.253660 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvjhf" event={"ID":"1d8fe23c-9df2-4779-86f1-49056e1e790f","Type":"ContainerDied","Data":"0464ce9563a55dfb1389446552ae39fc1489e6def259f83bdd3a876c83cee6d8"} Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.253696 4861 scope.go:117] "RemoveContainer" containerID="b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.253942 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54785db6bc-7bmkn" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.253559 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lvjhf" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.286870 4861 scope.go:117] "RemoveContainer" containerID="f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.334714 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lvjhf"] Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.346256 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lvjhf"] Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.356327 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54785db6bc-7bmkn"] Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.362149 4861 scope.go:117] "RemoveContainer" containerID="5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.364332 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54785db6bc-7bmkn"] Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.395586 4861 scope.go:117] "RemoveContainer" containerID="b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9" Jan 29 08:13:26 crc kubenswrapper[4861]: E0129 08:13:26.395921 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9\": container with ID starting with b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9 not found: ID does not exist" containerID="b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.395947 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9"} err="failed to get container status \"b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9\": rpc error: code = NotFound desc = could not find container \"b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9\": container with ID starting with b076f57c5db8bf592580536c26b2901205a7eca156ec5b5e383db689c05014f9 not found: ID does not exist" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.395965 4861 scope.go:117] "RemoveContainer" containerID="f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2" Jan 29 08:13:26 crc kubenswrapper[4861]: E0129 08:13:26.396195 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2\": container with ID starting with f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2 not found: ID does not exist" containerID="f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.396212 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2"} err="failed to get container status \"f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2\": rpc error: code = NotFound desc = could not find container \"f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2\": container with ID starting with f2f7b92aaaa9904ce4b7cdc39d3d43c07cb1cbad5e985b5095f46974efcc43f2 not found: ID does not exist" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.396225 4861 scope.go:117] "RemoveContainer" containerID="5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b" Jan 29 08:13:26 crc kubenswrapper[4861]: E0129 08:13:26.396500 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b\": container with ID starting with 5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b not found: ID does not exist" containerID="5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b" Jan 29 08:13:26 crc kubenswrapper[4861]: I0129 08:13:26.396535 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b"} err="failed to get container status \"5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b\": rpc error: code = NotFound desc = could not find container \"5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b\": container with ID starting with 5cb34a6285c565a5eb98bbb9abd62a36dc72b9b12528d470510dbf4f8623371b not found: ID does not exist" Jan 29 08:13:27 crc kubenswrapper[4861]: I0129 08:13:27.137060 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" path="/var/lib/kubelet/pods/1d8fe23c-9df2-4779-86f1-49056e1e790f/volumes" Jan 29 08:13:27 crc kubenswrapper[4861]: I0129 08:13:27.138376 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" path="/var/lib/kubelet/pods/d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9/volumes" Jan 29 08:13:31 crc kubenswrapper[4861]: I0129 08:13:31.590590 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 08:13:31 crc kubenswrapper[4861]: I0129 08:13:31.591271 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 08:13:32 crc kubenswrapper[4861]: I0129 08:13:32.613490 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.102:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:13:32 crc kubenswrapper[4861]: I0129 08:13:32.613792 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.102:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:13:39 crc kubenswrapper[4861]: I0129 08:13:39.131341 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:13:39 crc kubenswrapper[4861]: E0129 08:13:39.133758 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:13:41 crc kubenswrapper[4861]: I0129 08:13:41.597233 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 08:13:41 crc kubenswrapper[4861]: I0129 08:13:41.598023 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 08:13:41 crc kubenswrapper[4861]: I0129 08:13:41.598614 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 08:13:41 crc kubenswrapper[4861]: I0129 08:13:41.604770 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 08:13:42 crc kubenswrapper[4861]: I0129 08:13:42.424533 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 08:13:42 crc kubenswrapper[4861]: I0129 08:13:42.436466 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 08:13:45 crc kubenswrapper[4861]: I0129 08:13:45.057796 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bf8c-account-create-update-h5fb5"] Jan 29 08:13:45 crc kubenswrapper[4861]: I0129 08:13:45.067722 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-7pslv"] Jan 29 08:13:45 crc kubenswrapper[4861]: I0129 08:13:45.077465 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bf8c-account-create-update-h5fb5"] Jan 29 08:13:45 crc kubenswrapper[4861]: I0129 08:13:45.090765 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-7pslv"] Jan 29 08:13:45 crc kubenswrapper[4861]: I0129 08:13:45.125722 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af4e8282-a6ea-40e8-bdb6-72115771f88e" path="/var/lib/kubelet/pods/af4e8282-a6ea-40e8-bdb6-72115771f88e/volumes" Jan 29 08:13:45 crc kubenswrapper[4861]: I0129 08:13:45.126372 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d06c6849-8f0a-4626-9ae3-1922b1196771" path="/var/lib/kubelet/pods/d06c6849-8f0a-4626-9ae3-1922b1196771/volumes" Jan 29 08:13:52 crc kubenswrapper[4861]: I0129 08:13:52.043686 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-fb5p5"] Jan 29 08:13:52 crc kubenswrapper[4861]: I0129 08:13:52.053450 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-fb5p5"] Jan 29 08:13:52 crc kubenswrapper[4861]: I0129 08:13:52.116996 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:13:52 crc kubenswrapper[4861]: E0129 08:13:52.117437 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:13:53 crc kubenswrapper[4861]: I0129 08:13:53.126043 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed95aa8e-dd5c-4fb8-b415-03b895499221" path="/var/lib/kubelet/pods/ed95aa8e-dd5c-4fb8-b415-03b895499221/volumes" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.914296 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s62c5"] Jan 29 08:14:00 crc kubenswrapper[4861]: E0129 08:14:00.917323 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" containerName="dnsmasq-dns" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.917370 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" containerName="dnsmasq-dns" Jan 29 08:14:00 crc kubenswrapper[4861]: E0129 08:14:00.917385 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerName="extract-content" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.917392 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerName="extract-content" Jan 29 08:14:00 crc kubenswrapper[4861]: E0129 08:14:00.917406 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerName="registry-server" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.917412 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerName="registry-server" Jan 29 08:14:00 crc kubenswrapper[4861]: E0129 08:14:00.917435 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" containerName="init" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.917442 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" containerName="init" Jan 29 08:14:00 crc kubenswrapper[4861]: E0129 08:14:00.917452 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerName="extract-utilities" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.917458 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerName="extract-utilities" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.917742 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d8fe23c-9df2-4779-86f1-49056e1e790f" containerName="registry-server" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.917762 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2b77b63-fe00-4af0-8cbd-18be3d2a0cd9" containerName="dnsmasq-dns" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.918462 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.920702 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-jgg2l" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.920965 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.923468 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.940138 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-kqmjx"] Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.943299 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.956428 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s62c5"] Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.969991 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-kqmjx"] Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.987397 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-run\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.987456 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09c23e71-bf8c-49fc-a0bf-85ff216a6190-scripts\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.987523 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-log-ovn\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.987641 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-run-ovn\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.987742 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sf629\" (UniqueName: \"kubernetes.io/projected/09c23e71-bf8c-49fc-a0bf-85ff216a6190-kube-api-access-sf629\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.987821 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09c23e71-bf8c-49fc-a0bf-85ff216a6190-combined-ca-bundle\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:00 crc kubenswrapper[4861]: I0129 08:14:00.987866 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/09c23e71-bf8c-49fc-a0bf-85ff216a6190-ovn-controller-tls-certs\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089286 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxvwn\" (UniqueName: \"kubernetes.io/projected/0fd778e8-7702-4fc2-9af6-e06ad0631dce-kube-api-access-qxvwn\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089342 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-etc-ovs\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089373 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-run-ovn\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089563 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-lib\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089608 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-run\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089660 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-run-ovn\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089739 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sf629\" (UniqueName: \"kubernetes.io/projected/09c23e71-bf8c-49fc-a0bf-85ff216a6190-kube-api-access-sf629\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089863 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09c23e71-bf8c-49fc-a0bf-85ff216a6190-combined-ca-bundle\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.089921 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/09c23e71-bf8c-49fc-a0bf-85ff216a6190-ovn-controller-tls-certs\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.090001 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-run\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.090032 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09c23e71-bf8c-49fc-a0bf-85ff216a6190-scripts\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.090168 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-log-ovn\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.090211 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-run\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.090241 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-log\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.090296 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0fd778e8-7702-4fc2-9af6-e06ad0631dce-scripts\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.090337 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/09c23e71-bf8c-49fc-a0bf-85ff216a6190-var-log-ovn\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.092507 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09c23e71-bf8c-49fc-a0bf-85ff216a6190-scripts\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.095640 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/09c23e71-bf8c-49fc-a0bf-85ff216a6190-ovn-controller-tls-certs\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.095850 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09c23e71-bf8c-49fc-a0bf-85ff216a6190-combined-ca-bundle\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.107008 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sf629\" (UniqueName: \"kubernetes.io/projected/09c23e71-bf8c-49fc-a0bf-85ff216a6190-kube-api-access-sf629\") pod \"ovn-controller-s62c5\" (UID: \"09c23e71-bf8c-49fc-a0bf-85ff216a6190\") " pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.192244 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-log\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.192638 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-log\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.192685 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0fd778e8-7702-4fc2-9af6-e06ad0631dce-scripts\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.192755 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxvwn\" (UniqueName: \"kubernetes.io/projected/0fd778e8-7702-4fc2-9af6-e06ad0631dce-kube-api-access-qxvwn\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.192884 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-etc-ovs\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.193005 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-etc-ovs\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.193131 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-lib\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.193161 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-run\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.193607 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-lib\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.193959 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/0fd778e8-7702-4fc2-9af6-e06ad0631dce-var-run\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.194685 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0fd778e8-7702-4fc2-9af6-e06ad0631dce-scripts\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.214612 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxvwn\" (UniqueName: \"kubernetes.io/projected/0fd778e8-7702-4fc2-9af6-e06ad0631dce-kube-api-access-qxvwn\") pod \"ovn-controller-ovs-kqmjx\" (UID: \"0fd778e8-7702-4fc2-9af6-e06ad0631dce\") " pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.291792 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.301696 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:01 crc kubenswrapper[4861]: I0129 08:14:01.783610 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s62c5"] Jan 29 08:14:02 crc kubenswrapper[4861]: I0129 08:14:02.206970 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-kqmjx"] Jan 29 08:14:02 crc kubenswrapper[4861]: I0129 08:14:02.625008 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kqmjx" event={"ID":"0fd778e8-7702-4fc2-9af6-e06ad0631dce","Type":"ContainerStarted","Data":"1d22a242ca5920142abf3c05ed926bd22c33dbc6c4da941f69a20677e259115d"} Jan 29 08:14:02 crc kubenswrapper[4861]: I0129 08:14:02.625417 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kqmjx" event={"ID":"0fd778e8-7702-4fc2-9af6-e06ad0631dce","Type":"ContainerStarted","Data":"3f578d63e171c35cce3102ec5c67f46b4a3cded4998dbc9c2c8a5616ee4b54fa"} Jan 29 08:14:02 crc kubenswrapper[4861]: I0129 08:14:02.629054 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5" event={"ID":"09c23e71-bf8c-49fc-a0bf-85ff216a6190","Type":"ContainerStarted","Data":"6773430402b3fc24536db7feb95408430e840f29ced70c3dedb09620ec7e7a5a"} Jan 29 08:14:02 crc kubenswrapper[4861]: I0129 08:14:02.629629 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5" event={"ID":"09c23e71-bf8c-49fc-a0bf-85ff216a6190","Type":"ContainerStarted","Data":"90c8b1910640d8d0c0042b9cc7f99614320f0a7ee50c001af0031fdfc1d22a6b"} Jan 29 08:14:02 crc kubenswrapper[4861]: I0129 08:14:02.629671 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-s62c5" Jan 29 08:14:02 crc kubenswrapper[4861]: I0129 08:14:02.698844 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-s62c5" podStartSLOduration=2.698822481 podStartE2EDuration="2.698822481s" podCreationTimestamp="2026-01-29 08:14:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:14:02.687787551 +0000 UTC m=+5934.359282118" watchObservedRunningTime="2026-01-29 08:14:02.698822481 +0000 UTC m=+5934.370317048" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.583428 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-v597l"] Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.585043 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.591448 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.599501 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-v597l"] Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.647883 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d6dc2d4f-238b-499b-9df1-6f2875854172-ovs-rundir\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.647934 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dc2d4f-238b-499b-9df1-6f2875854172-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.648025 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d6dc2d4f-238b-499b-9df1-6f2875854172-ovn-rundir\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.648099 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6dc2d4f-238b-499b-9df1-6f2875854172-combined-ca-bundle\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.648154 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xvf2\" (UniqueName: \"kubernetes.io/projected/d6dc2d4f-238b-499b-9df1-6f2875854172-kube-api-access-7xvf2\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.648191 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6dc2d4f-238b-499b-9df1-6f2875854172-config\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.651611 4861 generic.go:334] "Generic (PLEG): container finished" podID="0fd778e8-7702-4fc2-9af6-e06ad0631dce" containerID="1d22a242ca5920142abf3c05ed926bd22c33dbc6c4da941f69a20677e259115d" exitCode=0 Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.651761 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kqmjx" event={"ID":"0fd778e8-7702-4fc2-9af6-e06ad0631dce","Type":"ContainerDied","Data":"1d22a242ca5920142abf3c05ed926bd22c33dbc6c4da941f69a20677e259115d"} Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.750717 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d6dc2d4f-238b-499b-9df1-6f2875854172-ovs-rundir\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.750766 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dc2d4f-238b-499b-9df1-6f2875854172-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.750838 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d6dc2d4f-238b-499b-9df1-6f2875854172-ovn-rundir\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.750894 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6dc2d4f-238b-499b-9df1-6f2875854172-combined-ca-bundle\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.750931 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xvf2\" (UniqueName: \"kubernetes.io/projected/d6dc2d4f-238b-499b-9df1-6f2875854172-kube-api-access-7xvf2\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.750956 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6dc2d4f-238b-499b-9df1-6f2875854172-config\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.752224 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d6dc2d4f-238b-499b-9df1-6f2875854172-ovs-rundir\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.752781 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d6dc2d4f-238b-499b-9df1-6f2875854172-ovn-rundir\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.753099 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6dc2d4f-238b-499b-9df1-6f2875854172-config\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.757625 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6dc2d4f-238b-499b-9df1-6f2875854172-combined-ca-bundle\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.757780 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dc2d4f-238b-499b-9df1-6f2875854172-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.777693 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xvf2\" (UniqueName: \"kubernetes.io/projected/d6dc2d4f-238b-499b-9df1-6f2875854172-kube-api-access-7xvf2\") pod \"ovn-controller-metrics-v597l\" (UID: \"d6dc2d4f-238b-499b-9df1-6f2875854172\") " pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:03 crc kubenswrapper[4861]: I0129 08:14:03.910000 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-v597l" Jan 29 08:14:04 crc kubenswrapper[4861]: I0129 08:14:04.415417 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-v597l"] Jan 29 08:14:04 crc kubenswrapper[4861]: W0129 08:14:04.418589 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6dc2d4f_238b_499b_9df1_6f2875854172.slice/crio-1ec399de072428eca94df9fa4d0c735ceb300d7c546a01cc23749556c6c89574 WatchSource:0}: Error finding container 1ec399de072428eca94df9fa4d0c735ceb300d7c546a01cc23749556c6c89574: Status 404 returned error can't find the container with id 1ec399de072428eca94df9fa4d0c735ceb300d7c546a01cc23749556c6c89574 Jan 29 08:14:04 crc kubenswrapper[4861]: I0129 08:14:04.660891 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-v597l" event={"ID":"d6dc2d4f-238b-499b-9df1-6f2875854172","Type":"ContainerStarted","Data":"1ec399de072428eca94df9fa4d0c735ceb300d7c546a01cc23749556c6c89574"} Jan 29 08:14:04 crc kubenswrapper[4861]: I0129 08:14:04.664623 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kqmjx" event={"ID":"0fd778e8-7702-4fc2-9af6-e06ad0631dce","Type":"ContainerStarted","Data":"76c1a60a5cda1eb5c9bef3b516ce6e31a080ffa4289f2b30233a0751c1b6e19c"} Jan 29 08:14:04 crc kubenswrapper[4861]: I0129 08:14:04.664782 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kqmjx" event={"ID":"0fd778e8-7702-4fc2-9af6-e06ad0631dce","Type":"ContainerStarted","Data":"7a5f13b2fbd1d3b1fd278cd4995a18170f676c5c7b4447f14860f5a5d33e6094"} Jan 29 08:14:04 crc kubenswrapper[4861]: I0129 08:14:04.664858 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:04 crc kubenswrapper[4861]: I0129 08:14:04.693919 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-kqmjx" podStartSLOduration=4.693902299 podStartE2EDuration="4.693902299s" podCreationTimestamp="2026-01-29 08:14:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:14:04.688938448 +0000 UTC m=+5936.360433005" watchObservedRunningTime="2026-01-29 08:14:04.693902299 +0000 UTC m=+5936.365396856" Jan 29 08:14:05 crc kubenswrapper[4861]: I0129 08:14:05.674973 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-v597l" event={"ID":"d6dc2d4f-238b-499b-9df1-6f2875854172","Type":"ContainerStarted","Data":"535a34c5b65e2fa8850a7fcc6659958e4536c2e27c1a86f30a27da1f161105d0"} Jan 29 08:14:05 crc kubenswrapper[4861]: I0129 08:14:05.675299 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:05 crc kubenswrapper[4861]: I0129 08:14:05.717683 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-v597l" podStartSLOduration=2.717661003 podStartE2EDuration="2.717661003s" podCreationTimestamp="2026-01-29 08:14:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:14:05.704714852 +0000 UTC m=+5937.376209419" watchObservedRunningTime="2026-01-29 08:14:05.717661003 +0000 UTC m=+5937.389155550" Jan 29 08:14:06 crc kubenswrapper[4861]: I0129 08:14:06.066204 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-l8bkz"] Jan 29 08:14:06 crc kubenswrapper[4861]: I0129 08:14:06.080718 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-l8bkz"] Jan 29 08:14:06 crc kubenswrapper[4861]: I0129 08:14:06.117031 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:14:06 crc kubenswrapper[4861]: E0129 08:14:06.117397 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:14:07 crc kubenswrapper[4861]: I0129 08:14:07.129948 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b25499ba-e788-480f-87c8-f6e8b2178236" path="/var/lib/kubelet/pods/b25499ba-e788-480f-87c8-f6e8b2178236/volumes" Jan 29 08:14:18 crc kubenswrapper[4861]: I0129 08:14:18.949528 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-glm78"] Jan 29 08:14:18 crc kubenswrapper[4861]: I0129 08:14:18.952003 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-glm78" Jan 29 08:14:18 crc kubenswrapper[4861]: I0129 08:14:18.960760 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-glm78"] Jan 29 08:14:18 crc kubenswrapper[4861]: I0129 08:14:18.999589 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z65sn\" (UniqueName: \"kubernetes.io/projected/399da928-5e53-4b19-8d3c-464215b26f81-kube-api-access-z65sn\") pod \"octavia-db-create-glm78\" (UID: \"399da928-5e53-4b19-8d3c-464215b26f81\") " pod="openstack/octavia-db-create-glm78" Jan 29 08:14:18 crc kubenswrapper[4861]: I0129 08:14:18.999655 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/399da928-5e53-4b19-8d3c-464215b26f81-operator-scripts\") pod \"octavia-db-create-glm78\" (UID: \"399da928-5e53-4b19-8d3c-464215b26f81\") " pod="openstack/octavia-db-create-glm78" Jan 29 08:14:19 crc kubenswrapper[4861]: I0129 08:14:19.101040 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z65sn\" (UniqueName: \"kubernetes.io/projected/399da928-5e53-4b19-8d3c-464215b26f81-kube-api-access-z65sn\") pod \"octavia-db-create-glm78\" (UID: \"399da928-5e53-4b19-8d3c-464215b26f81\") " pod="openstack/octavia-db-create-glm78" Jan 29 08:14:19 crc kubenswrapper[4861]: I0129 08:14:19.101129 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/399da928-5e53-4b19-8d3c-464215b26f81-operator-scripts\") pod \"octavia-db-create-glm78\" (UID: \"399da928-5e53-4b19-8d3c-464215b26f81\") " pod="openstack/octavia-db-create-glm78" Jan 29 08:14:19 crc kubenswrapper[4861]: I0129 08:14:19.102046 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/399da928-5e53-4b19-8d3c-464215b26f81-operator-scripts\") pod \"octavia-db-create-glm78\" (UID: \"399da928-5e53-4b19-8d3c-464215b26f81\") " pod="openstack/octavia-db-create-glm78" Jan 29 08:14:19 crc kubenswrapper[4861]: I0129 08:14:19.132141 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z65sn\" (UniqueName: \"kubernetes.io/projected/399da928-5e53-4b19-8d3c-464215b26f81-kube-api-access-z65sn\") pod \"octavia-db-create-glm78\" (UID: \"399da928-5e53-4b19-8d3c-464215b26f81\") " pod="openstack/octavia-db-create-glm78" Jan 29 08:14:19 crc kubenswrapper[4861]: I0129 08:14:19.308853 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-glm78" Jan 29 08:14:19 crc kubenswrapper[4861]: I0129 08:14:19.761048 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-glm78"] Jan 29 08:14:19 crc kubenswrapper[4861]: W0129 08:14:19.762993 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod399da928_5e53_4b19_8d3c_464215b26f81.slice/crio-b8163f74d58c808595311ebe10877941d590ac4641f4ac358e70c30c99ce8e0f WatchSource:0}: Error finding container b8163f74d58c808595311ebe10877941d590ac4641f4ac358e70c30c99ce8e0f: Status 404 returned error can't find the container with id b8163f74d58c808595311ebe10877941d590ac4641f4ac358e70c30c99ce8e0f Jan 29 08:14:19 crc kubenswrapper[4861]: I0129 08:14:19.857410 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-glm78" event={"ID":"399da928-5e53-4b19-8d3c-464215b26f81","Type":"ContainerStarted","Data":"b8163f74d58c808595311ebe10877941d590ac4641f4ac358e70c30c99ce8e0f"} Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.405850 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-09f1-account-create-update-xxwpk"] Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.408430 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.412952 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.438828 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-09f1-account-create-update-xxwpk"] Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.531737 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hmvg\" (UniqueName: \"kubernetes.io/projected/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-kube-api-access-5hmvg\") pod \"octavia-09f1-account-create-update-xxwpk\" (UID: \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\") " pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.532193 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-operator-scripts\") pod \"octavia-09f1-account-create-update-xxwpk\" (UID: \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\") " pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.633958 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-operator-scripts\") pod \"octavia-09f1-account-create-update-xxwpk\" (UID: \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\") " pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.634181 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hmvg\" (UniqueName: \"kubernetes.io/projected/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-kube-api-access-5hmvg\") pod \"octavia-09f1-account-create-update-xxwpk\" (UID: \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\") " pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.635430 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-operator-scripts\") pod \"octavia-09f1-account-create-update-xxwpk\" (UID: \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\") " pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.670017 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hmvg\" (UniqueName: \"kubernetes.io/projected/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-kube-api-access-5hmvg\") pod \"octavia-09f1-account-create-update-xxwpk\" (UID: \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\") " pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.738990 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.879847 4861 generic.go:334] "Generic (PLEG): container finished" podID="399da928-5e53-4b19-8d3c-464215b26f81" containerID="e65db8bedabb7aa4c88e2488663c9d8189453dd52c355a02369b4054a76b68cf" exitCode=0 Jan 29 08:14:20 crc kubenswrapper[4861]: I0129 08:14:20.880543 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-glm78" event={"ID":"399da928-5e53-4b19-8d3c-464215b26f81","Type":"ContainerDied","Data":"e65db8bedabb7aa4c88e2488663c9d8189453dd52c355a02369b4054a76b68cf"} Jan 29 08:14:21 crc kubenswrapper[4861]: I0129 08:14:21.116385 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:14:21 crc kubenswrapper[4861]: E0129 08:14:21.116770 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:14:21 crc kubenswrapper[4861]: I0129 08:14:21.242664 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-09f1-account-create-update-xxwpk"] Jan 29 08:14:21 crc kubenswrapper[4861]: W0129 08:14:21.247840 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf50d96e4_61f0_48b2_af43_0f8401cd2bcb.slice/crio-6c1caf5a415ab6c5d44a7bfa6b62b2e3f92ccea17aeed73514d069d76a67e0d6 WatchSource:0}: Error finding container 6c1caf5a415ab6c5d44a7bfa6b62b2e3f92ccea17aeed73514d069d76a67e0d6: Status 404 returned error can't find the container with id 6c1caf5a415ab6c5d44a7bfa6b62b2e3f92ccea17aeed73514d069d76a67e0d6 Jan 29 08:14:21 crc kubenswrapper[4861]: I0129 08:14:21.891455 4861 generic.go:334] "Generic (PLEG): container finished" podID="f50d96e4-61f0-48b2-af43-0f8401cd2bcb" containerID="42c5b350709293513348f6e0fcb9f1276f4ad764469c9e4d7b1672cfd9c3fe52" exitCode=0 Jan 29 08:14:21 crc kubenswrapper[4861]: I0129 08:14:21.891564 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-09f1-account-create-update-xxwpk" event={"ID":"f50d96e4-61f0-48b2-af43-0f8401cd2bcb","Type":"ContainerDied","Data":"42c5b350709293513348f6e0fcb9f1276f4ad764469c9e4d7b1672cfd9c3fe52"} Jan 29 08:14:21 crc kubenswrapper[4861]: I0129 08:14:21.891872 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-09f1-account-create-update-xxwpk" event={"ID":"f50d96e4-61f0-48b2-af43-0f8401cd2bcb","Type":"ContainerStarted","Data":"6c1caf5a415ab6c5d44a7bfa6b62b2e3f92ccea17aeed73514d069d76a67e0d6"} Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.238371 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-glm78" Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.365491 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z65sn\" (UniqueName: \"kubernetes.io/projected/399da928-5e53-4b19-8d3c-464215b26f81-kube-api-access-z65sn\") pod \"399da928-5e53-4b19-8d3c-464215b26f81\" (UID: \"399da928-5e53-4b19-8d3c-464215b26f81\") " Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.365560 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/399da928-5e53-4b19-8d3c-464215b26f81-operator-scripts\") pod \"399da928-5e53-4b19-8d3c-464215b26f81\" (UID: \"399da928-5e53-4b19-8d3c-464215b26f81\") " Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.366369 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/399da928-5e53-4b19-8d3c-464215b26f81-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "399da928-5e53-4b19-8d3c-464215b26f81" (UID: "399da928-5e53-4b19-8d3c-464215b26f81"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.372242 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/399da928-5e53-4b19-8d3c-464215b26f81-kube-api-access-z65sn" (OuterVolumeSpecName: "kube-api-access-z65sn") pod "399da928-5e53-4b19-8d3c-464215b26f81" (UID: "399da928-5e53-4b19-8d3c-464215b26f81"). InnerVolumeSpecName "kube-api-access-z65sn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.467641 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z65sn\" (UniqueName: \"kubernetes.io/projected/399da928-5e53-4b19-8d3c-464215b26f81-kube-api-access-z65sn\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.467848 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/399da928-5e53-4b19-8d3c-464215b26f81-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.902328 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-glm78" event={"ID":"399da928-5e53-4b19-8d3c-464215b26f81","Type":"ContainerDied","Data":"b8163f74d58c808595311ebe10877941d590ac4641f4ac358e70c30c99ce8e0f"} Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.903186 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8163f74d58c808595311ebe10877941d590ac4641f4ac358e70c30c99ce8e0f" Jan 29 08:14:22 crc kubenswrapper[4861]: I0129 08:14:22.902360 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-glm78" Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.214835 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.385944 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-operator-scripts\") pod \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\" (UID: \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\") " Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.386339 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hmvg\" (UniqueName: \"kubernetes.io/projected/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-kube-api-access-5hmvg\") pod \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\" (UID: \"f50d96e4-61f0-48b2-af43-0f8401cd2bcb\") " Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.386530 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f50d96e4-61f0-48b2-af43-0f8401cd2bcb" (UID: "f50d96e4-61f0-48b2-af43-0f8401cd2bcb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.387124 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.392473 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-kube-api-access-5hmvg" (OuterVolumeSpecName: "kube-api-access-5hmvg") pod "f50d96e4-61f0-48b2-af43-0f8401cd2bcb" (UID: "f50d96e4-61f0-48b2-af43-0f8401cd2bcb"). InnerVolumeSpecName "kube-api-access-5hmvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.488627 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hmvg\" (UniqueName: \"kubernetes.io/projected/f50d96e4-61f0-48b2-af43-0f8401cd2bcb-kube-api-access-5hmvg\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.911290 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-09f1-account-create-update-xxwpk" event={"ID":"f50d96e4-61f0-48b2-af43-0f8401cd2bcb","Type":"ContainerDied","Data":"6c1caf5a415ab6c5d44a7bfa6b62b2e3f92ccea17aeed73514d069d76a67e0d6"} Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.911642 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c1caf5a415ab6c5d44a7bfa6b62b2e3f92ccea17aeed73514d069d76a67e0d6" Jan 29 08:14:23 crc kubenswrapper[4861]: I0129 08:14:23.911351 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-09f1-account-create-update-xxwpk" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.634632 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-mvwwh"] Jan 29 08:14:25 crc kubenswrapper[4861]: E0129 08:14:25.635247 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f50d96e4-61f0-48b2-af43-0f8401cd2bcb" containerName="mariadb-account-create-update" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.635269 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f50d96e4-61f0-48b2-af43-0f8401cd2bcb" containerName="mariadb-account-create-update" Jan 29 08:14:25 crc kubenswrapper[4861]: E0129 08:14:25.635342 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="399da928-5e53-4b19-8d3c-464215b26f81" containerName="mariadb-database-create" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.635357 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="399da928-5e53-4b19-8d3c-464215b26f81" containerName="mariadb-database-create" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.635957 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="399da928-5e53-4b19-8d3c-464215b26f81" containerName="mariadb-database-create" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.636032 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f50d96e4-61f0-48b2-af43-0f8401cd2bcb" containerName="mariadb-account-create-update" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.637229 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.648755 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-mvwwh"] Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.834274 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrtk5\" (UniqueName: \"kubernetes.io/projected/38e9508e-d706-4b9e-9d40-91535eb221c6-kube-api-access-vrtk5\") pod \"octavia-persistence-db-create-mvwwh\" (UID: \"38e9508e-d706-4b9e-9d40-91535eb221c6\") " pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.834900 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38e9508e-d706-4b9e-9d40-91535eb221c6-operator-scripts\") pod \"octavia-persistence-db-create-mvwwh\" (UID: \"38e9508e-d706-4b9e-9d40-91535eb221c6\") " pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.936405 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrtk5\" (UniqueName: \"kubernetes.io/projected/38e9508e-d706-4b9e-9d40-91535eb221c6-kube-api-access-vrtk5\") pod \"octavia-persistence-db-create-mvwwh\" (UID: \"38e9508e-d706-4b9e-9d40-91535eb221c6\") " pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.936505 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38e9508e-d706-4b9e-9d40-91535eb221c6-operator-scripts\") pod \"octavia-persistence-db-create-mvwwh\" (UID: \"38e9508e-d706-4b9e-9d40-91535eb221c6\") " pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.937337 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38e9508e-d706-4b9e-9d40-91535eb221c6-operator-scripts\") pod \"octavia-persistence-db-create-mvwwh\" (UID: \"38e9508e-d706-4b9e-9d40-91535eb221c6\") " pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.954501 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrtk5\" (UniqueName: \"kubernetes.io/projected/38e9508e-d706-4b9e-9d40-91535eb221c6-kube-api-access-vrtk5\") pod \"octavia-persistence-db-create-mvwwh\" (UID: \"38e9508e-d706-4b9e-9d40-91535eb221c6\") " pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:25 crc kubenswrapper[4861]: I0129 08:14:25.959360 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.353946 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-047c-account-create-update-z9kr2"] Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.356169 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.359390 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.363144 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-047c-account-create-update-z9kr2"] Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.419498 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-mvwwh"] Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.449316 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ecefac8f-faec-489d-9c82-62e0ccd17d16-operator-scripts\") pod \"octavia-047c-account-create-update-z9kr2\" (UID: \"ecefac8f-faec-489d-9c82-62e0ccd17d16\") " pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.449366 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8dvq\" (UniqueName: \"kubernetes.io/projected/ecefac8f-faec-489d-9c82-62e0ccd17d16-kube-api-access-s8dvq\") pod \"octavia-047c-account-create-update-z9kr2\" (UID: \"ecefac8f-faec-489d-9c82-62e0ccd17d16\") " pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.551663 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ecefac8f-faec-489d-9c82-62e0ccd17d16-operator-scripts\") pod \"octavia-047c-account-create-update-z9kr2\" (UID: \"ecefac8f-faec-489d-9c82-62e0ccd17d16\") " pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.551996 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8dvq\" (UniqueName: \"kubernetes.io/projected/ecefac8f-faec-489d-9c82-62e0ccd17d16-kube-api-access-s8dvq\") pod \"octavia-047c-account-create-update-z9kr2\" (UID: \"ecefac8f-faec-489d-9c82-62e0ccd17d16\") " pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.553917 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ecefac8f-faec-489d-9c82-62e0ccd17d16-operator-scripts\") pod \"octavia-047c-account-create-update-z9kr2\" (UID: \"ecefac8f-faec-489d-9c82-62e0ccd17d16\") " pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.589646 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8dvq\" (UniqueName: \"kubernetes.io/projected/ecefac8f-faec-489d-9c82-62e0ccd17d16-kube-api-access-s8dvq\") pod \"octavia-047c-account-create-update-z9kr2\" (UID: \"ecefac8f-faec-489d-9c82-62e0ccd17d16\") " pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.688572 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.790685 4861 scope.go:117] "RemoveContainer" containerID="f35ec5af27d2d883bceee71bf96b4729804a12e136fbc7a704c3a7825ea81508" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.840520 4861 scope.go:117] "RemoveContainer" containerID="fb593a16c90450a1199b5f756278ac73394c2f844d143205139cd3dd8d01ed99" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.901612 4861 scope.go:117] "RemoveContainer" containerID="058a0f70dd93d722bfef6f49e08a2bc112d16bde8851b7461552897dc4e195fa" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.920629 4861 scope.go:117] "RemoveContainer" containerID="ca66f7168cdb049ceb8f4ca4419850e3f8f81328c1406ee864e6eee211a0ab05" Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.945145 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-mvwwh" event={"ID":"38e9508e-d706-4b9e-9d40-91535eb221c6","Type":"ContainerStarted","Data":"d0420f79ccbf297db6d8885e86b532dca6e8f22c5019900c56af46fd31995c71"} Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.945193 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-mvwwh" event={"ID":"38e9508e-d706-4b9e-9d40-91535eb221c6","Type":"ContainerStarted","Data":"677142be123febe17ff3d69b87db66f61483a055b02f45ad9adb13b12a5de4cd"} Jan 29 08:14:26 crc kubenswrapper[4861]: I0129 08:14:26.973697 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-persistence-db-create-mvwwh" podStartSLOduration=1.973678252 podStartE2EDuration="1.973678252s" podCreationTimestamp="2026-01-29 08:14:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:14:26.967975502 +0000 UTC m=+5958.639470059" watchObservedRunningTime="2026-01-29 08:14:26.973678252 +0000 UTC m=+5958.645172799" Jan 29 08:14:27 crc kubenswrapper[4861]: W0129 08:14:27.203977 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podecefac8f_faec_489d_9c82_62e0ccd17d16.slice/crio-bb5a244d1aaa54f713d14bdd73803ac9ac306302d50f2a7903476deb5e36971a WatchSource:0}: Error finding container bb5a244d1aaa54f713d14bdd73803ac9ac306302d50f2a7903476deb5e36971a: Status 404 returned error can't find the container with id bb5a244d1aaa54f713d14bdd73803ac9ac306302d50f2a7903476deb5e36971a Jan 29 08:14:27 crc kubenswrapper[4861]: I0129 08:14:27.213924 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-047c-account-create-update-z9kr2"] Jan 29 08:14:27 crc kubenswrapper[4861]: I0129 08:14:27.965363 4861 generic.go:334] "Generic (PLEG): container finished" podID="38e9508e-d706-4b9e-9d40-91535eb221c6" containerID="d0420f79ccbf297db6d8885e86b532dca6e8f22c5019900c56af46fd31995c71" exitCode=0 Jan 29 08:14:27 crc kubenswrapper[4861]: I0129 08:14:27.965490 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-mvwwh" event={"ID":"38e9508e-d706-4b9e-9d40-91535eb221c6","Type":"ContainerDied","Data":"d0420f79ccbf297db6d8885e86b532dca6e8f22c5019900c56af46fd31995c71"} Jan 29 08:14:27 crc kubenswrapper[4861]: I0129 08:14:27.971288 4861 generic.go:334] "Generic (PLEG): container finished" podID="ecefac8f-faec-489d-9c82-62e0ccd17d16" containerID="d17087f7c4bd3135816851700890f8eda3ad7bda848c1b8f20e450722dd796ee" exitCode=0 Jan 29 08:14:27 crc kubenswrapper[4861]: I0129 08:14:27.971359 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-047c-account-create-update-z9kr2" event={"ID":"ecefac8f-faec-489d-9c82-62e0ccd17d16","Type":"ContainerDied","Data":"d17087f7c4bd3135816851700890f8eda3ad7bda848c1b8f20e450722dd796ee"} Jan 29 08:14:27 crc kubenswrapper[4861]: I0129 08:14:27.971424 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-047c-account-create-update-z9kr2" event={"ID":"ecefac8f-faec-489d-9c82-62e0ccd17d16","Type":"ContainerStarted","Data":"bb5a244d1aaa54f713d14bdd73803ac9ac306302d50f2a7903476deb5e36971a"} Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.453149 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.556271 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.617397 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8dvq\" (UniqueName: \"kubernetes.io/projected/ecefac8f-faec-489d-9c82-62e0ccd17d16-kube-api-access-s8dvq\") pod \"ecefac8f-faec-489d-9c82-62e0ccd17d16\" (UID: \"ecefac8f-faec-489d-9c82-62e0ccd17d16\") " Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.617564 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ecefac8f-faec-489d-9c82-62e0ccd17d16-operator-scripts\") pod \"ecefac8f-faec-489d-9c82-62e0ccd17d16\" (UID: \"ecefac8f-faec-489d-9c82-62e0ccd17d16\") " Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.618292 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecefac8f-faec-489d-9c82-62e0ccd17d16-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ecefac8f-faec-489d-9c82-62e0ccd17d16" (UID: "ecefac8f-faec-489d-9c82-62e0ccd17d16"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.622423 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecefac8f-faec-489d-9c82-62e0ccd17d16-kube-api-access-s8dvq" (OuterVolumeSpecName: "kube-api-access-s8dvq") pod "ecefac8f-faec-489d-9c82-62e0ccd17d16" (UID: "ecefac8f-faec-489d-9c82-62e0ccd17d16"). InnerVolumeSpecName "kube-api-access-s8dvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.719685 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38e9508e-d706-4b9e-9d40-91535eb221c6-operator-scripts\") pod \"38e9508e-d706-4b9e-9d40-91535eb221c6\" (UID: \"38e9508e-d706-4b9e-9d40-91535eb221c6\") " Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.719874 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrtk5\" (UniqueName: \"kubernetes.io/projected/38e9508e-d706-4b9e-9d40-91535eb221c6-kube-api-access-vrtk5\") pod \"38e9508e-d706-4b9e-9d40-91535eb221c6\" (UID: \"38e9508e-d706-4b9e-9d40-91535eb221c6\") " Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.720455 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8dvq\" (UniqueName: \"kubernetes.io/projected/ecefac8f-faec-489d-9c82-62e0ccd17d16-kube-api-access-s8dvq\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.720477 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ecefac8f-faec-489d-9c82-62e0ccd17d16-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.721222 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38e9508e-d706-4b9e-9d40-91535eb221c6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "38e9508e-d706-4b9e-9d40-91535eb221c6" (UID: "38e9508e-d706-4b9e-9d40-91535eb221c6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.722997 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38e9508e-d706-4b9e-9d40-91535eb221c6-kube-api-access-vrtk5" (OuterVolumeSpecName: "kube-api-access-vrtk5") pod "38e9508e-d706-4b9e-9d40-91535eb221c6" (UID: "38e9508e-d706-4b9e-9d40-91535eb221c6"). InnerVolumeSpecName "kube-api-access-vrtk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.822803 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38e9508e-d706-4b9e-9d40-91535eb221c6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.822851 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrtk5\" (UniqueName: \"kubernetes.io/projected/38e9508e-d706-4b9e-9d40-91535eb221c6-kube-api-access-vrtk5\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.995378 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-mvwwh" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.995370 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-mvwwh" event={"ID":"38e9508e-d706-4b9e-9d40-91535eb221c6","Type":"ContainerDied","Data":"677142be123febe17ff3d69b87db66f61483a055b02f45ad9adb13b12a5de4cd"} Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.995540 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="677142be123febe17ff3d69b87db66f61483a055b02f45ad9adb13b12a5de4cd" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.999368 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-047c-account-create-update-z9kr2" event={"ID":"ecefac8f-faec-489d-9c82-62e0ccd17d16","Type":"ContainerDied","Data":"bb5a244d1aaa54f713d14bdd73803ac9ac306302d50f2a7903476deb5e36971a"} Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.999419 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb5a244d1aaa54f713d14bdd73803ac9ac306302d50f2a7903476deb5e36971a" Jan 29 08:14:29 crc kubenswrapper[4861]: I0129 08:14:29.999471 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-047c-account-create-update-z9kr2" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.767680 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-cd49fd867-jr7th"] Jan 29 08:14:31 crc kubenswrapper[4861]: E0129 08:14:31.768505 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e9508e-d706-4b9e-9d40-91535eb221c6" containerName="mariadb-database-create" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.768525 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e9508e-d706-4b9e-9d40-91535eb221c6" containerName="mariadb-database-create" Jan 29 08:14:31 crc kubenswrapper[4861]: E0129 08:14:31.768569 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecefac8f-faec-489d-9c82-62e0ccd17d16" containerName="mariadb-account-create-update" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.768577 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecefac8f-faec-489d-9c82-62e0ccd17d16" containerName="mariadb-account-create-update" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.768811 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecefac8f-faec-489d-9c82-62e0ccd17d16" containerName="mariadb-account-create-update" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.768827 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e9508e-d706-4b9e-9d40-91535eb221c6" containerName="mariadb-database-create" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.770497 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.773650 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-ovndbs" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.774237 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-r67ml" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.774456 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.774694 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.789489 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-cd49fd867-jr7th"] Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.968669 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-ovndb-tls-certs\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.968801 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-octavia-run\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.968825 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-scripts\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.968886 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data-merged\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.968928 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-combined-ca-bundle\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:31 crc kubenswrapper[4861]: I0129 08:14:31.968948 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.072343 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-scripts\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.072379 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-octavia-run\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.073036 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-octavia-run\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.073325 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data-merged\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.073377 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-combined-ca-bundle\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.073421 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.073529 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-ovndb-tls-certs\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.074008 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data-merged\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.079139 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-combined-ca-bundle\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.079239 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.079808 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-scripts\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.080319 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-ovndb-tls-certs\") pod \"octavia-api-cd49fd867-jr7th\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.098949 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:32 crc kubenswrapper[4861]: I0129 08:14:32.593420 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-cd49fd867-jr7th"] Jan 29 08:14:33 crc kubenswrapper[4861]: I0129 08:14:33.025279 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-cd49fd867-jr7th" event={"ID":"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89","Type":"ContainerStarted","Data":"f89ab9e9b6b15a6a73269a95d9fb905567fd20927b169ac14634cf95ddd6e3de"} Jan 29 08:14:35 crc kubenswrapper[4861]: I0129 08:14:35.116600 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:14:35 crc kubenswrapper[4861]: E0129 08:14:35.117286 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.337555 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-s62c5" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.345383 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.350716 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-kqmjx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.438466 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s62c5-config-prkdx"] Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.440138 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.442891 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.454223 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s62c5-config-prkdx"] Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.583665 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-log-ovn\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.583736 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run-ovn\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.583769 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-additional-scripts\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.583811 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-scripts\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.583835 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqt4h\" (UniqueName: \"kubernetes.io/projected/86b616bd-1d8b-4bd9-8453-dcede13eab51-kube-api-access-qqt4h\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.583868 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.686631 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.687013 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.687140 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-log-ovn\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.687244 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run-ovn\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.687344 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-additional-scripts\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.687443 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-scripts\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.687528 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqt4h\" (UniqueName: \"kubernetes.io/projected/86b616bd-1d8b-4bd9-8453-dcede13eab51-kube-api-access-qqt4h\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.687247 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-log-ovn\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.687281 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run-ovn\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.688815 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-additional-scripts\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.690440 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-scripts\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.711123 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqt4h\" (UniqueName: \"kubernetes.io/projected/86b616bd-1d8b-4bd9-8453-dcede13eab51-kube-api-access-qqt4h\") pod \"ovn-controller-s62c5-config-prkdx\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:36 crc kubenswrapper[4861]: I0129 08:14:36.759793 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:42 crc kubenswrapper[4861]: I0129 08:14:42.754178 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s62c5-config-prkdx"] Jan 29 08:14:43 crc kubenswrapper[4861]: I0129 08:14:43.134616 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-prkdx" event={"ID":"86b616bd-1d8b-4bd9-8453-dcede13eab51","Type":"ContainerStarted","Data":"f2e3d1cea217dc1a6c7a3acd9d45d94df20482ac02b980207b00a34bd59b039d"} Jan 29 08:14:43 crc kubenswrapper[4861]: I0129 08:14:43.134926 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-prkdx" event={"ID":"86b616bd-1d8b-4bd9-8453-dcede13eab51","Type":"ContainerStarted","Data":"b901c64918189401be40b7f282514a4fbefef5f6b2bd5e8925028058ac21d1e6"} Jan 29 08:14:43 crc kubenswrapper[4861]: I0129 08:14:43.143615 4861 generic.go:334] "Generic (PLEG): container finished" podID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerID="affd3286942ce39d651b34c9aa87a34e6c12197b4e87acc27070237b5f209c5d" exitCode=0 Jan 29 08:14:43 crc kubenswrapper[4861]: I0129 08:14:43.143871 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-cd49fd867-jr7th" event={"ID":"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89","Type":"ContainerDied","Data":"affd3286942ce39d651b34c9aa87a34e6c12197b4e87acc27070237b5f209c5d"} Jan 29 08:14:43 crc kubenswrapper[4861]: I0129 08:14:43.164507 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-s62c5-config-prkdx" podStartSLOduration=7.164488843 podStartE2EDuration="7.164488843s" podCreationTimestamp="2026-01-29 08:14:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:14:43.159177623 +0000 UTC m=+5974.830672190" watchObservedRunningTime="2026-01-29 08:14:43.164488843 +0000 UTC m=+5974.835983400" Jan 29 08:14:44 crc kubenswrapper[4861]: I0129 08:14:44.156221 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-cd49fd867-jr7th" event={"ID":"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89","Type":"ContainerStarted","Data":"e8092efd3846afded23206571f13cd3fa241baf304315972a0f3b9e1412df5fb"} Jan 29 08:14:44 crc kubenswrapper[4861]: I0129 08:14:44.156519 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-cd49fd867-jr7th" event={"ID":"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89","Type":"ContainerStarted","Data":"dd5edcf1b954de97d3719158099bf8a02e64eae4e4f3f8935eb40426a71882b5"} Jan 29 08:14:44 crc kubenswrapper[4861]: I0129 08:14:44.156717 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:44 crc kubenswrapper[4861]: I0129 08:14:44.156761 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:14:44 crc kubenswrapper[4861]: I0129 08:14:44.158033 4861 generic.go:334] "Generic (PLEG): container finished" podID="86b616bd-1d8b-4bd9-8453-dcede13eab51" containerID="f2e3d1cea217dc1a6c7a3acd9d45d94df20482ac02b980207b00a34bd59b039d" exitCode=0 Jan 29 08:14:44 crc kubenswrapper[4861]: I0129 08:14:44.158099 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-prkdx" event={"ID":"86b616bd-1d8b-4bd9-8453-dcede13eab51","Type":"ContainerDied","Data":"f2e3d1cea217dc1a6c7a3acd9d45d94df20482ac02b980207b00a34bd59b039d"} Jan 29 08:14:44 crc kubenswrapper[4861]: I0129 08:14:44.183266 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-cd49fd867-jr7th" podStartSLOduration=3.49063947 podStartE2EDuration="13.183241694s" podCreationTimestamp="2026-01-29 08:14:31 +0000 UTC" firstStartedPulling="2026-01-29 08:14:32.608941174 +0000 UTC m=+5964.280435731" lastFinishedPulling="2026-01-29 08:14:42.301543388 +0000 UTC m=+5973.973037955" observedRunningTime="2026-01-29 08:14:44.173787735 +0000 UTC m=+5975.845282292" watchObservedRunningTime="2026-01-29 08:14:44.183241694 +0000 UTC m=+5975.854736251" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.555410 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.681877 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-scripts\") pod \"86b616bd-1d8b-4bd9-8453-dcede13eab51\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.682280 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run\") pod \"86b616bd-1d8b-4bd9-8453-dcede13eab51\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.682315 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run-ovn\") pod \"86b616bd-1d8b-4bd9-8453-dcede13eab51\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.682396 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqt4h\" (UniqueName: \"kubernetes.io/projected/86b616bd-1d8b-4bd9-8453-dcede13eab51-kube-api-access-qqt4h\") pod \"86b616bd-1d8b-4bd9-8453-dcede13eab51\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.682427 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run" (OuterVolumeSpecName: "var-run") pod "86b616bd-1d8b-4bd9-8453-dcede13eab51" (UID: "86b616bd-1d8b-4bd9-8453-dcede13eab51"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.682455 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "86b616bd-1d8b-4bd9-8453-dcede13eab51" (UID: "86b616bd-1d8b-4bd9-8453-dcede13eab51"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.682482 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-log-ovn\") pod \"86b616bd-1d8b-4bd9-8453-dcede13eab51\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.682618 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "86b616bd-1d8b-4bd9-8453-dcede13eab51" (UID: "86b616bd-1d8b-4bd9-8453-dcede13eab51"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.682688 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-additional-scripts\") pod \"86b616bd-1d8b-4bd9-8453-dcede13eab51\" (UID: \"86b616bd-1d8b-4bd9-8453-dcede13eab51\") " Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.683160 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "86b616bd-1d8b-4bd9-8453-dcede13eab51" (UID: "86b616bd-1d8b-4bd9-8453-dcede13eab51"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.683184 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-scripts" (OuterVolumeSpecName: "scripts") pod "86b616bd-1d8b-4bd9-8453-dcede13eab51" (UID: "86b616bd-1d8b-4bd9-8453-dcede13eab51"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.683679 4861 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.683694 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86b616bd-1d8b-4bd9-8453-dcede13eab51-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.683703 4861 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.683711 4861 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.683720 4861 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/86b616bd-1d8b-4bd9-8453-dcede13eab51-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.689290 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86b616bd-1d8b-4bd9-8453-dcede13eab51-kube-api-access-qqt4h" (OuterVolumeSpecName: "kube-api-access-qqt4h") pod "86b616bd-1d8b-4bd9-8453-dcede13eab51" (UID: "86b616bd-1d8b-4bd9-8453-dcede13eab51"). InnerVolumeSpecName "kube-api-access-qqt4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.785676 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqt4h\" (UniqueName: \"kubernetes.io/projected/86b616bd-1d8b-4bd9-8453-dcede13eab51-kube-api-access-qqt4h\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.818911 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-s62c5-config-prkdx"] Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.826488 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-s62c5-config-prkdx"] Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.863869 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s62c5-config-cfr5v"] Jan 29 08:14:45 crc kubenswrapper[4861]: E0129 08:14:45.864514 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86b616bd-1d8b-4bd9-8453-dcede13eab51" containerName="ovn-config" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.864544 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="86b616bd-1d8b-4bd9-8453-dcede13eab51" containerName="ovn-config" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.864864 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="86b616bd-1d8b-4bd9-8453-dcede13eab51" containerName="ovn-config" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.865936 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.874848 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s62c5-config-cfr5v"] Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.989080 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-additional-scripts\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.989177 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-log-ovn\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.989252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run-ovn\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.989274 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xrxq\" (UniqueName: \"kubernetes.io/projected/78bc456c-ac0b-4bd5-9d92-9882d553fe43-kube-api-access-9xrxq\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.989295 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-scripts\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:45 crc kubenswrapper[4861]: I0129 08:14:45.989333 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.091090 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-additional-scripts\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.091491 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-log-ovn\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.091688 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run-ovn\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.091793 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xrxq\" (UniqueName: \"kubernetes.io/projected/78bc456c-ac0b-4bd5-9d92-9882d553fe43-kube-api-access-9xrxq\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.091907 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-scripts\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.091985 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run-ovn\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.091914 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-additional-scripts\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.092235 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.092425 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.092534 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-log-ovn\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.093898 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-scripts\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.109060 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xrxq\" (UniqueName: \"kubernetes.io/projected/78bc456c-ac0b-4bd5-9d92-9882d553fe43-kube-api-access-9xrxq\") pod \"ovn-controller-s62c5-config-cfr5v\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.182313 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b901c64918189401be40b7f282514a4fbefef5f6b2bd5e8925028058ac21d1e6" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.182371 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-prkdx" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.183742 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:46 crc kubenswrapper[4861]: I0129 08:14:46.706098 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s62c5-config-cfr5v"] Jan 29 08:14:47 crc kubenswrapper[4861]: I0129 08:14:47.116818 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:14:47 crc kubenswrapper[4861]: E0129 08:14:47.117563 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:14:47 crc kubenswrapper[4861]: I0129 08:14:47.126130 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86b616bd-1d8b-4bd9-8453-dcede13eab51" path="/var/lib/kubelet/pods/86b616bd-1d8b-4bd9-8453-dcede13eab51/volumes" Jan 29 08:14:47 crc kubenswrapper[4861]: I0129 08:14:47.215603 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-cfr5v" event={"ID":"78bc456c-ac0b-4bd5-9d92-9882d553fe43","Type":"ContainerStarted","Data":"5d3f504535707a2504a5472775f3ab594598e93fece93a6ad7f2384b9bf6e7c4"} Jan 29 08:14:47 crc kubenswrapper[4861]: I0129 08:14:47.215815 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-cfr5v" event={"ID":"78bc456c-ac0b-4bd5-9d92-9882d553fe43","Type":"ContainerStarted","Data":"93f42aad56e5969456b2a8870af60b28076940688031c2d9eb84a097f5a9538b"} Jan 29 08:14:47 crc kubenswrapper[4861]: I0129 08:14:47.248224 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-s62c5-config-cfr5v" podStartSLOduration=2.24820435 podStartE2EDuration="2.24820435s" podCreationTimestamp="2026-01-29 08:14:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:14:47.240675792 +0000 UTC m=+5978.912170349" watchObservedRunningTime="2026-01-29 08:14:47.24820435 +0000 UTC m=+5978.919698907" Jan 29 08:14:48 crc kubenswrapper[4861]: I0129 08:14:48.228687 4861 generic.go:334] "Generic (PLEG): container finished" podID="78bc456c-ac0b-4bd5-9d92-9882d553fe43" containerID="5d3f504535707a2504a5472775f3ab594598e93fece93a6ad7f2384b9bf6e7c4" exitCode=0 Jan 29 08:14:48 crc kubenswrapper[4861]: I0129 08:14:48.228875 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-cfr5v" event={"ID":"78bc456c-ac0b-4bd5-9d92-9882d553fe43","Type":"ContainerDied","Data":"5d3f504535707a2504a5472775f3ab594598e93fece93a6ad7f2384b9bf6e7c4"} Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.684875 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.794688 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-additional-scripts\") pod \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.794787 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xrxq\" (UniqueName: \"kubernetes.io/projected/78bc456c-ac0b-4bd5-9d92-9882d553fe43-kube-api-access-9xrxq\") pod \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.794860 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run\") pod \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.794883 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-log-ovn\") pod \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.794922 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-scripts\") pod \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.795045 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run-ovn\") pod \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\" (UID: \"78bc456c-ac0b-4bd5-9d92-9882d553fe43\") " Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.795151 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "78bc456c-ac0b-4bd5-9d92-9882d553fe43" (UID: "78bc456c-ac0b-4bd5-9d92-9882d553fe43"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.795191 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run" (OuterVolumeSpecName: "var-run") pod "78bc456c-ac0b-4bd5-9d92-9882d553fe43" (UID: "78bc456c-ac0b-4bd5-9d92-9882d553fe43"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.795219 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "78bc456c-ac0b-4bd5-9d92-9882d553fe43" (UID: "78bc456c-ac0b-4bd5-9d92-9882d553fe43"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.795616 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "78bc456c-ac0b-4bd5-9d92-9882d553fe43" (UID: "78bc456c-ac0b-4bd5-9d92-9882d553fe43"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.795843 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-scripts" (OuterVolumeSpecName: "scripts") pod "78bc456c-ac0b-4bd5-9d92-9882d553fe43" (UID: "78bc456c-ac0b-4bd5-9d92-9882d553fe43"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.796373 4861 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.796394 4861 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.796405 4861 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.796414 4861 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78bc456c-ac0b-4bd5-9d92-9882d553fe43-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.796422 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78bc456c-ac0b-4bd5-9d92-9882d553fe43-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.800442 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78bc456c-ac0b-4bd5-9d92-9882d553fe43-kube-api-access-9xrxq" (OuterVolumeSpecName: "kube-api-access-9xrxq") pod "78bc456c-ac0b-4bd5-9d92-9882d553fe43" (UID: "78bc456c-ac0b-4bd5-9d92-9882d553fe43"). InnerVolumeSpecName "kube-api-access-9xrxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:14:49 crc kubenswrapper[4861]: I0129 08:14:49.897907 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xrxq\" (UniqueName: \"kubernetes.io/projected/78bc456c-ac0b-4bd5-9d92-9882d553fe43-kube-api-access-9xrxq\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.253489 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-cfr5v" event={"ID":"78bc456c-ac0b-4bd5-9d92-9882d553fe43","Type":"ContainerDied","Data":"93f42aad56e5969456b2a8870af60b28076940688031c2d9eb84a097f5a9538b"} Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.253551 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93f42aad56e5969456b2a8870af60b28076940688031c2d9eb84a097f5a9538b" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.253639 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-cfr5v" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.369501 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-s62c5-config-cfr5v"] Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.378410 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-s62c5-config-cfr5v"] Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.454461 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-s62c5-config-xz9kq"] Jan 29 08:14:50 crc kubenswrapper[4861]: E0129 08:14:50.455680 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78bc456c-ac0b-4bd5-9d92-9882d553fe43" containerName="ovn-config" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.455702 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="78bc456c-ac0b-4bd5-9d92-9882d553fe43" containerName="ovn-config" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.455913 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="78bc456c-ac0b-4bd5-9d92-9882d553fe43" containerName="ovn-config" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.456733 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.458741 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.470293 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s62c5-config-xz9kq"] Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.509477 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-scripts\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.509543 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run-ovn\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.509603 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.509634 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlhb9\" (UniqueName: \"kubernetes.io/projected/65cd72da-e3cd-4d61-a9bf-1013da526649-kube-api-access-mlhb9\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.509667 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-log-ovn\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.509694 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-additional-scripts\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.610924 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlhb9\" (UniqueName: \"kubernetes.io/projected/65cd72da-e3cd-4d61-a9bf-1013da526649-kube-api-access-mlhb9\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.610995 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-log-ovn\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.611031 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-additional-scripts\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.611119 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-scripts\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.611180 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run-ovn\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.611247 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.611373 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.611373 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-log-ovn\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.611437 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run-ovn\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.612370 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-additional-scripts\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.613748 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-scripts\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.657051 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlhb9\" (UniqueName: \"kubernetes.io/projected/65cd72da-e3cd-4d61-a9bf-1013da526649-kube-api-access-mlhb9\") pod \"ovn-controller-s62c5-config-xz9kq\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:50 crc kubenswrapper[4861]: I0129 08:14:50.785417 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:51 crc kubenswrapper[4861]: I0129 08:14:51.129373 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78bc456c-ac0b-4bd5-9d92-9882d553fe43" path="/var/lib/kubelet/pods/78bc456c-ac0b-4bd5-9d92-9882d553fe43/volumes" Jan 29 08:14:51 crc kubenswrapper[4861]: I0129 08:14:51.276637 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-s62c5-config-xz9kq"] Jan 29 08:14:52 crc kubenswrapper[4861]: I0129 08:14:52.272251 4861 generic.go:334] "Generic (PLEG): container finished" podID="65cd72da-e3cd-4d61-a9bf-1013da526649" containerID="d1b4b2579f93de80c6fb7542c2df049f4c23d233ffdb9f97239370ee5449582e" exitCode=0 Jan 29 08:14:52 crc kubenswrapper[4861]: I0129 08:14:52.272353 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-xz9kq" event={"ID":"65cd72da-e3cd-4d61-a9bf-1013da526649","Type":"ContainerDied","Data":"d1b4b2579f93de80c6fb7542c2df049f4c23d233ffdb9f97239370ee5449582e"} Jan 29 08:14:52 crc kubenswrapper[4861]: I0129 08:14:52.272568 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-xz9kq" event={"ID":"65cd72da-e3cd-4d61-a9bf-1013da526649","Type":"ContainerStarted","Data":"219e30e1e4c62f0ab98203876d3e0689915ae7d9b4f08f4902af9df2ab458fc5"} Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.705700 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766104 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-log-ovn\") pod \"65cd72da-e3cd-4d61-a9bf-1013da526649\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766165 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run\") pod \"65cd72da-e3cd-4d61-a9bf-1013da526649\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766287 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run-ovn\") pod \"65cd72da-e3cd-4d61-a9bf-1013da526649\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766293 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run" (OuterVolumeSpecName: "var-run") pod "65cd72da-e3cd-4d61-a9bf-1013da526649" (UID: "65cd72da-e3cd-4d61-a9bf-1013da526649"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766476 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlhb9\" (UniqueName: \"kubernetes.io/projected/65cd72da-e3cd-4d61-a9bf-1013da526649-kube-api-access-mlhb9\") pod \"65cd72da-e3cd-4d61-a9bf-1013da526649\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766340 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "65cd72da-e3cd-4d61-a9bf-1013da526649" (UID: "65cd72da-e3cd-4d61-a9bf-1013da526649"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766426 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "65cd72da-e3cd-4d61-a9bf-1013da526649" (UID: "65cd72da-e3cd-4d61-a9bf-1013da526649"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766574 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-additional-scripts\") pod \"65cd72da-e3cd-4d61-a9bf-1013da526649\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.766652 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-scripts\") pod \"65cd72da-e3cd-4d61-a9bf-1013da526649\" (UID: \"65cd72da-e3cd-4d61-a9bf-1013da526649\") " Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.767325 4861 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.767342 4861 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.767352 4861 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/65cd72da-e3cd-4d61-a9bf-1013da526649-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.767491 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "65cd72da-e3cd-4d61-a9bf-1013da526649" (UID: "65cd72da-e3cd-4d61-a9bf-1013da526649"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.767931 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-scripts" (OuterVolumeSpecName: "scripts") pod "65cd72da-e3cd-4d61-a9bf-1013da526649" (UID: "65cd72da-e3cd-4d61-a9bf-1013da526649"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.784348 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65cd72da-e3cd-4d61-a9bf-1013da526649-kube-api-access-mlhb9" (OuterVolumeSpecName: "kube-api-access-mlhb9") pod "65cd72da-e3cd-4d61-a9bf-1013da526649" (UID: "65cd72da-e3cd-4d61-a9bf-1013da526649"). InnerVolumeSpecName "kube-api-access-mlhb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.869556 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlhb9\" (UniqueName: \"kubernetes.io/projected/65cd72da-e3cd-4d61-a9bf-1013da526649-kube-api-access-mlhb9\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.869910 4861 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:53 crc kubenswrapper[4861]: I0129 08:14:53.869925 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/65cd72da-e3cd-4d61-a9bf-1013da526649-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:14:54 crc kubenswrapper[4861]: I0129 08:14:54.297679 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-s62c5-config-xz9kq" event={"ID":"65cd72da-e3cd-4d61-a9bf-1013da526649","Type":"ContainerDied","Data":"219e30e1e4c62f0ab98203876d3e0689915ae7d9b4f08f4902af9df2ab458fc5"} Jan 29 08:14:54 crc kubenswrapper[4861]: I0129 08:14:54.297741 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="219e30e1e4c62f0ab98203876d3e0689915ae7d9b4f08f4902af9df2ab458fc5" Jan 29 08:14:54 crc kubenswrapper[4861]: I0129 08:14:54.297818 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-s62c5-config-xz9kq" Jan 29 08:14:54 crc kubenswrapper[4861]: I0129 08:14:54.825993 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-s62c5-config-xz9kq"] Jan 29 08:14:54 crc kubenswrapper[4861]: I0129 08:14:54.843463 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-s62c5-config-xz9kq"] Jan 29 08:14:55 crc kubenswrapper[4861]: I0129 08:14:55.133828 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65cd72da-e3cd-4d61-a9bf-1013da526649" path="/var/lib/kubelet/pods/65cd72da-e3cd-4d61-a9bf-1013da526649/volumes" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.146158 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w"] Jan 29 08:15:00 crc kubenswrapper[4861]: E0129 08:15:00.146988 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65cd72da-e3cd-4d61-a9bf-1013da526649" containerName="ovn-config" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.147000 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="65cd72da-e3cd-4d61-a9bf-1013da526649" containerName="ovn-config" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.147230 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="65cd72da-e3cd-4d61-a9bf-1013da526649" containerName="ovn-config" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.147861 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.152091 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.152692 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.169189 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w"] Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.317603 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t628\" (UniqueName: \"kubernetes.io/projected/58031ad0-9a82-4948-a2dd-cf318360285f-kube-api-access-8t628\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.317728 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58031ad0-9a82-4948-a2dd-cf318360285f-secret-volume\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.318041 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58031ad0-9a82-4948-a2dd-cf318360285f-config-volume\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.419703 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58031ad0-9a82-4948-a2dd-cf318360285f-secret-volume\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.419795 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58031ad0-9a82-4948-a2dd-cf318360285f-config-volume\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.419947 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t628\" (UniqueName: \"kubernetes.io/projected/58031ad0-9a82-4948-a2dd-cf318360285f-kube-api-access-8t628\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.421187 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58031ad0-9a82-4948-a2dd-cf318360285f-config-volume\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.444531 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58031ad0-9a82-4948-a2dd-cf318360285f-secret-volume\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.451276 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t628\" (UniqueName: \"kubernetes.io/projected/58031ad0-9a82-4948-a2dd-cf318360285f-kube-api-access-8t628\") pod \"collect-profiles-29494575-ftg8w\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.478101 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:00 crc kubenswrapper[4861]: I0129 08:15:00.972193 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w"] Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.116199 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.363584 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" event={"ID":"58031ad0-9a82-4948-a2dd-cf318360285f","Type":"ContainerStarted","Data":"51e52a5d8b34952c802563cbe111c7a8e57b4c8a82f4fb7d1869a577154fda05"} Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.363635 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" event={"ID":"58031ad0-9a82-4948-a2dd-cf318360285f","Type":"ContainerStarted","Data":"035e5d991de2f837adf327986ccff2ea40e2da10bb13f76bf4b99b045153132e"} Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.371283 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"fcca75648caaf02940cb3f4b6284809f6ed607019ae9564066e870fde1b501ad"} Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.387582 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" podStartSLOduration=1.387560218 podStartE2EDuration="1.387560218s" podCreationTimestamp="2026-01-29 08:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:15:01.377333239 +0000 UTC m=+5993.048827816" watchObservedRunningTime="2026-01-29 08:15:01.387560218 +0000 UTC m=+5993.059054785" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.585263 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-7b4hh"] Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.586937 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.588912 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.589240 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.589386 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.648088 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-7b4hh"] Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.754756 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-config-data\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.754862 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-scripts\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.754929 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-config-data-merged\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.754959 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-hm-ports\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.856859 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-hm-ports\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.857053 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-config-data\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.857137 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-scripts\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.857183 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-config-data-merged\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.857616 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-config-data-merged\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.857797 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-hm-ports\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.863562 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-config-data\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.879798 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f06299f-473a-4b1e-8c5c-8b6eae0a27d8-scripts\") pod \"octavia-rsyslog-7b4hh\" (UID: \"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8\") " pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:01 crc kubenswrapper[4861]: I0129 08:15:01.937525 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.355059 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-65dd99cb46-gzv86"] Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.357245 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.360706 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.374685 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-65dd99cb46-gzv86"] Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.407708 4861 generic.go:334] "Generic (PLEG): container finished" podID="58031ad0-9a82-4948-a2dd-cf318360285f" containerID="51e52a5d8b34952c802563cbe111c7a8e57b4c8a82f4fb7d1869a577154fda05" exitCode=0 Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.407776 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" event={"ID":"58031ad0-9a82-4948-a2dd-cf318360285f","Type":"ContainerDied","Data":"51e52a5d8b34952c802563cbe111c7a8e57b4c8a82f4fb7d1869a577154fda05"} Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.482297 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/b516abb5-d52a-4fc7-9169-178bb103a5b4-amphora-image\") pod \"octavia-image-upload-65dd99cb46-gzv86\" (UID: \"b516abb5-d52a-4fc7-9169-178bb103a5b4\") " pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.482451 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b516abb5-d52a-4fc7-9169-178bb103a5b4-httpd-config\") pod \"octavia-image-upload-65dd99cb46-gzv86\" (UID: \"b516abb5-d52a-4fc7-9169-178bb103a5b4\") " pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.584002 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b516abb5-d52a-4fc7-9169-178bb103a5b4-httpd-config\") pod \"octavia-image-upload-65dd99cb46-gzv86\" (UID: \"b516abb5-d52a-4fc7-9169-178bb103a5b4\") " pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.584163 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/b516abb5-d52a-4fc7-9169-178bb103a5b4-amphora-image\") pod \"octavia-image-upload-65dd99cb46-gzv86\" (UID: \"b516abb5-d52a-4fc7-9169-178bb103a5b4\") " pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.584699 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/b516abb5-d52a-4fc7-9169-178bb103a5b4-amphora-image\") pod \"octavia-image-upload-65dd99cb46-gzv86\" (UID: \"b516abb5-d52a-4fc7-9169-178bb103a5b4\") " pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.588058 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-7b4hh"] Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.594220 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b516abb5-d52a-4fc7-9169-178bb103a5b4-httpd-config\") pod \"octavia-image-upload-65dd99cb46-gzv86\" (UID: \"b516abb5-d52a-4fc7-9169-178bb103a5b4\") " pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.696863 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-7b4hh"] Jan 29 08:15:02 crc kubenswrapper[4861]: I0129 08:15:02.729044 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.201383 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-65dd99cb46-gzv86"] Jan 29 08:15:03 crc kubenswrapper[4861]: W0129 08:15:03.212636 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb516abb5_d52a_4fc7_9169_178bb103a5b4.slice/crio-b08c01114767090be0e054ce718b7b1feea8d4d6150f6aea812519cc7bf99e13 WatchSource:0}: Error finding container b08c01114767090be0e054ce718b7b1feea8d4d6150f6aea812519cc7bf99e13: Status 404 returned error can't find the container with id b08c01114767090be0e054ce718b7b1feea8d4d6150f6aea812519cc7bf99e13 Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.421155 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7b4hh" event={"ID":"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8","Type":"ContainerStarted","Data":"055bd8331b0dc71779ccc9a5fa50cf563185fd1e04cec75fce5ef23648e0e292"} Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.424395 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" event={"ID":"b516abb5-d52a-4fc7-9169-178bb103a5b4","Type":"ContainerStarted","Data":"b08c01114767090be0e054ce718b7b1feea8d4d6150f6aea812519cc7bf99e13"} Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.650363 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-f2bgs"] Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.653549 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.656329 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.658161 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-f2bgs"] Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.809425 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data-merged\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.809739 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-scripts\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.809846 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-combined-ca-bundle\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.809886 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.911424 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-combined-ca-bundle\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.911486 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.911723 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data-merged\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.912207 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data-merged\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.912541 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-scripts\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.917253 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-scripts\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.917415 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-combined-ca-bundle\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.920926 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data\") pod \"octavia-db-sync-f2bgs\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:03 crc kubenswrapper[4861]: I0129 08:15:03.989865 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.231129 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.321952 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8t628\" (UniqueName: \"kubernetes.io/projected/58031ad0-9a82-4948-a2dd-cf318360285f-kube-api-access-8t628\") pod \"58031ad0-9a82-4948-a2dd-cf318360285f\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.321995 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58031ad0-9a82-4948-a2dd-cf318360285f-secret-volume\") pod \"58031ad0-9a82-4948-a2dd-cf318360285f\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.322246 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58031ad0-9a82-4948-a2dd-cf318360285f-config-volume\") pod \"58031ad0-9a82-4948-a2dd-cf318360285f\" (UID: \"58031ad0-9a82-4948-a2dd-cf318360285f\") " Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.323944 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58031ad0-9a82-4948-a2dd-cf318360285f-config-volume" (OuterVolumeSpecName: "config-volume") pod "58031ad0-9a82-4948-a2dd-cf318360285f" (UID: "58031ad0-9a82-4948-a2dd-cf318360285f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.329215 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58031ad0-9a82-4948-a2dd-cf318360285f-kube-api-access-8t628" (OuterVolumeSpecName: "kube-api-access-8t628") pod "58031ad0-9a82-4948-a2dd-cf318360285f" (UID: "58031ad0-9a82-4948-a2dd-cf318360285f"). InnerVolumeSpecName "kube-api-access-8t628". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.329791 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58031ad0-9a82-4948-a2dd-cf318360285f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "58031ad0-9a82-4948-a2dd-cf318360285f" (UID: "58031ad0-9a82-4948-a2dd-cf318360285f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.424697 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58031ad0-9a82-4948-a2dd-cf318360285f-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.424986 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58031ad0-9a82-4948-a2dd-cf318360285f-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.424998 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8t628\" (UniqueName: \"kubernetes.io/projected/58031ad0-9a82-4948-a2dd-cf318360285f-kube-api-access-8t628\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.455923 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" event={"ID":"58031ad0-9a82-4948-a2dd-cf318360285f","Type":"ContainerDied","Data":"035e5d991de2f837adf327986ccff2ea40e2da10bb13f76bf4b99b045153132e"} Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.455968 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="035e5d991de2f837adf327986ccff2ea40e2da10bb13f76bf4b99b045153132e" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.456001 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w" Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.466606 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m"] Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.479277 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494530-jdd2m"] Jan 29 08:15:04 crc kubenswrapper[4861]: I0129 08:15:04.755095 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-f2bgs"] Jan 29 08:15:05 crc kubenswrapper[4861]: I0129 08:15:05.129590 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ba09b3c-c560-4a97-904f-3691faea50ef" path="/var/lib/kubelet/pods/3ba09b3c-c560-4a97-904f-3691faea50ef/volumes" Jan 29 08:15:05 crc kubenswrapper[4861]: I0129 08:15:05.466113 4861 generic.go:334] "Generic (PLEG): container finished" podID="faf89663-f5be-49c1-8c26-6317e2ebd435" containerID="31e37f3853942b4de0e6066f1d115606cbeee90fb6cbb564ae6a2ad5dee7e99d" exitCode=0 Jan 29 08:15:05 crc kubenswrapper[4861]: I0129 08:15:05.466172 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-f2bgs" event={"ID":"faf89663-f5be-49c1-8c26-6317e2ebd435","Type":"ContainerDied","Data":"31e37f3853942b4de0e6066f1d115606cbeee90fb6cbb564ae6a2ad5dee7e99d"} Jan 29 08:15:05 crc kubenswrapper[4861]: I0129 08:15:05.466198 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-f2bgs" event={"ID":"faf89663-f5be-49c1-8c26-6317e2ebd435","Type":"ContainerStarted","Data":"372159f8783fa4126a9dc069ec6a08b3a05d94e0210c5c6e0c67184343168ffc"} Jan 29 08:15:05 crc kubenswrapper[4861]: I0129 08:15:05.478356 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7b4hh" event={"ID":"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8","Type":"ContainerStarted","Data":"78f4ba1f310520afa8149b1f8901e1d7976ef1fbf3241edefc561213eaaa7f2b"} Jan 29 08:15:06 crc kubenswrapper[4861]: I0129 08:15:06.489551 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-f2bgs" event={"ID":"faf89663-f5be-49c1-8c26-6317e2ebd435","Type":"ContainerStarted","Data":"f27442c9044066c0d00bb83daac2585ec87ae652b0530dc27835bb142a05ed7d"} Jan 29 08:15:06 crc kubenswrapper[4861]: I0129 08:15:06.512011 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-f2bgs" podStartSLOduration=3.511992222 podStartE2EDuration="3.511992222s" podCreationTimestamp="2026-01-29 08:15:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:15:06.504122375 +0000 UTC m=+5998.175616932" watchObservedRunningTime="2026-01-29 08:15:06.511992222 +0000 UTC m=+5998.183486779" Jan 29 08:15:06 crc kubenswrapper[4861]: I0129 08:15:06.544152 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:15:06 crc kubenswrapper[4861]: I0129 08:15:06.732925 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:15:07 crc kubenswrapper[4861]: I0129 08:15:07.507603 4861 generic.go:334] "Generic (PLEG): container finished" podID="3f06299f-473a-4b1e-8c5c-8b6eae0a27d8" containerID="78f4ba1f310520afa8149b1f8901e1d7976ef1fbf3241edefc561213eaaa7f2b" exitCode=0 Jan 29 08:15:07 crc kubenswrapper[4861]: I0129 08:15:07.507717 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7b4hh" event={"ID":"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8","Type":"ContainerDied","Data":"78f4ba1f310520afa8149b1f8901e1d7976ef1fbf3241edefc561213eaaa7f2b"} Jan 29 08:15:09 crc kubenswrapper[4861]: I0129 08:15:09.525237 4861 generic.go:334] "Generic (PLEG): container finished" podID="faf89663-f5be-49c1-8c26-6317e2ebd435" containerID="f27442c9044066c0d00bb83daac2585ec87ae652b0530dc27835bb142a05ed7d" exitCode=0 Jan 29 08:15:09 crc kubenswrapper[4861]: I0129 08:15:09.525322 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-f2bgs" event={"ID":"faf89663-f5be-49c1-8c26-6317e2ebd435","Type":"ContainerDied","Data":"f27442c9044066c0d00bb83daac2585ec87ae652b0530dc27835bb142a05ed7d"} Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.685996 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.809251 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data-merged\") pod \"faf89663-f5be-49c1-8c26-6317e2ebd435\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.810758 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data\") pod \"faf89663-f5be-49c1-8c26-6317e2ebd435\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.810856 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-combined-ca-bundle\") pod \"faf89663-f5be-49c1-8c26-6317e2ebd435\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.810913 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-scripts\") pod \"faf89663-f5be-49c1-8c26-6317e2ebd435\" (UID: \"faf89663-f5be-49c1-8c26-6317e2ebd435\") " Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.815867 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data" (OuterVolumeSpecName: "config-data") pod "faf89663-f5be-49c1-8c26-6317e2ebd435" (UID: "faf89663-f5be-49c1-8c26-6317e2ebd435"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.820206 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-scripts" (OuterVolumeSpecName: "scripts") pod "faf89663-f5be-49c1-8c26-6317e2ebd435" (UID: "faf89663-f5be-49c1-8c26-6317e2ebd435"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.837236 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "faf89663-f5be-49c1-8c26-6317e2ebd435" (UID: "faf89663-f5be-49c1-8c26-6317e2ebd435"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.846809 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "faf89663-f5be-49c1-8c26-6317e2ebd435" (UID: "faf89663-f5be-49c1-8c26-6317e2ebd435"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.913065 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data-merged\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.913107 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.913116 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:12 crc kubenswrapper[4861]: I0129 08:15:12.913124 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf89663-f5be-49c1-8c26-6317e2ebd435-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:13 crc kubenswrapper[4861]: I0129 08:15:13.566145 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-f2bgs" event={"ID":"faf89663-f5be-49c1-8c26-6317e2ebd435","Type":"ContainerDied","Data":"372159f8783fa4126a9dc069ec6a08b3a05d94e0210c5c6e0c67184343168ffc"} Jan 29 08:15:13 crc kubenswrapper[4861]: I0129 08:15:13.566496 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="372159f8783fa4126a9dc069ec6a08b3a05d94e0210c5c6e0c67184343168ffc" Jan 29 08:15:13 crc kubenswrapper[4861]: I0129 08:15:13.566586 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-f2bgs" Jan 29 08:15:13 crc kubenswrapper[4861]: I0129 08:15:13.570297 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" event={"ID":"b516abb5-d52a-4fc7-9169-178bb103a5b4","Type":"ContainerStarted","Data":"1bf280a77d0eab6625ecf6ae606269b3aa49e27ffbe6168ee50a6d76c0916cab"} Jan 29 08:15:13 crc kubenswrapper[4861]: I0129 08:15:13.577275 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7b4hh" event={"ID":"3f06299f-473a-4b1e-8c5c-8b6eae0a27d8","Type":"ContainerStarted","Data":"18e7dcba303cebc8286c3ca7ee4d5c2974cc3b9b52435df4082efd377b21559f"} Jan 29 08:15:13 crc kubenswrapper[4861]: I0129 08:15:13.577585 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:13 crc kubenswrapper[4861]: I0129 08:15:13.657687 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-7b4hh" podStartSLOduration=2.318471388 podStartE2EDuration="12.657653325s" podCreationTimestamp="2026-01-29 08:15:01 +0000 UTC" firstStartedPulling="2026-01-29 08:15:02.59419053 +0000 UTC m=+5994.265685087" lastFinishedPulling="2026-01-29 08:15:12.933372467 +0000 UTC m=+6004.604867024" observedRunningTime="2026-01-29 08:15:13.646548183 +0000 UTC m=+6005.318042760" watchObservedRunningTime="2026-01-29 08:15:13.657653325 +0000 UTC m=+6005.329147922" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.694440 4861 generic.go:334] "Generic (PLEG): container finished" podID="b516abb5-d52a-4fc7-9169-178bb103a5b4" containerID="1bf280a77d0eab6625ecf6ae606269b3aa49e27ffbe6168ee50a6d76c0916cab" exitCode=0 Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.695256 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" event={"ID":"b516abb5-d52a-4fc7-9169-178bb103a5b4","Type":"ContainerDied","Data":"1bf280a77d0eab6625ecf6ae606269b3aa49e27ffbe6168ee50a6d76c0916cab"} Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.820699 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-586997dfdc-nzlgn"] Jan 29 08:15:14 crc kubenswrapper[4861]: E0129 08:15:14.821432 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faf89663-f5be-49c1-8c26-6317e2ebd435" containerName="octavia-db-sync" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.821544 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="faf89663-f5be-49c1-8c26-6317e2ebd435" containerName="octavia-db-sync" Jan 29 08:15:14 crc kubenswrapper[4861]: E0129 08:15:14.821656 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faf89663-f5be-49c1-8c26-6317e2ebd435" containerName="init" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.821749 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="faf89663-f5be-49c1-8c26-6317e2ebd435" containerName="init" Jan 29 08:15:14 crc kubenswrapper[4861]: E0129 08:15:14.821851 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58031ad0-9a82-4948-a2dd-cf318360285f" containerName="collect-profiles" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.821937 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="58031ad0-9a82-4948-a2dd-cf318360285f" containerName="collect-profiles" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.826225 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="58031ad0-9a82-4948-a2dd-cf318360285f" containerName="collect-profiles" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.826510 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="faf89663-f5be-49c1-8c26-6317e2ebd435" containerName="octavia-db-sync" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.828059 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.830783 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-internal-svc" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.831061 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-public-svc" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.871404 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-586997dfdc-nzlgn"] Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.975280 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-scripts\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.975598 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-octavia-run\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.975631 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-internal-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.975665 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-combined-ca-bundle\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.975716 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-public-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.975783 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-config-data\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.975869 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-ovndb-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:14 crc kubenswrapper[4861]: I0129 08:15:14.975907 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-config-data-merged\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078123 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-ovndb-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078202 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-config-data-merged\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078298 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-scripts\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078342 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-octavia-run\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078373 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-internal-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078406 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-combined-ca-bundle\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078453 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-public-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078517 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-config-data\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.078918 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-config-data-merged\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.079205 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-octavia-run\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.082704 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-config-data\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.082873 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-public-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.083313 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-internal-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.083382 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-ovndb-tls-certs\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.084052 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-scripts\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.084537 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5dd14c4-275b-46ce-9c6d-cbba0800e2a0-combined-ca-bundle\") pod \"octavia-api-586997dfdc-nzlgn\" (UID: \"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0\") " pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.169550 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.663058 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-586997dfdc-nzlgn"] Jan 29 08:15:15 crc kubenswrapper[4861]: W0129 08:15:15.665475 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5dd14c4_275b_46ce_9c6d_cbba0800e2a0.slice/crio-2dd768444c7fc2227aff4ef8e3e1bd001c7d22a5074baa1668bd5e628b9061fb WatchSource:0}: Error finding container 2dd768444c7fc2227aff4ef8e3e1bd001c7d22a5074baa1668bd5e628b9061fb: Status 404 returned error can't find the container with id 2dd768444c7fc2227aff4ef8e3e1bd001c7d22a5074baa1668bd5e628b9061fb Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.709703 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" event={"ID":"b516abb5-d52a-4fc7-9169-178bb103a5b4","Type":"ContainerStarted","Data":"0af1bc2f068aa0f46df2d0bb7217ee60022ae52e5ca33586931104bbf67aae87"} Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.712246 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-586997dfdc-nzlgn" event={"ID":"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0","Type":"ContainerStarted","Data":"2dd768444c7fc2227aff4ef8e3e1bd001c7d22a5074baa1668bd5e628b9061fb"} Jan 29 08:15:15 crc kubenswrapper[4861]: I0129 08:15:15.739359 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" podStartSLOduration=3.872746392 podStartE2EDuration="13.739328291s" podCreationTimestamp="2026-01-29 08:15:02 +0000 UTC" firstStartedPulling="2026-01-29 08:15:03.215243392 +0000 UTC m=+5994.886737949" lastFinishedPulling="2026-01-29 08:15:13.081825291 +0000 UTC m=+6004.753319848" observedRunningTime="2026-01-29 08:15:15.733839007 +0000 UTC m=+6007.405333604" watchObservedRunningTime="2026-01-29 08:15:15.739328291 +0000 UTC m=+6007.410822868" Jan 29 08:15:16 crc kubenswrapper[4861]: I0129 08:15:16.724986 4861 generic.go:334] "Generic (PLEG): container finished" podID="e5dd14c4-275b-46ce-9c6d-cbba0800e2a0" containerID="3f6723751d93585b090f44824755c21254fd487a3ba282ec9827e2341779c85e" exitCode=0 Jan 29 08:15:16 crc kubenswrapper[4861]: I0129 08:15:16.725092 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-586997dfdc-nzlgn" event={"ID":"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0","Type":"ContainerDied","Data":"3f6723751d93585b090f44824755c21254fd487a3ba282ec9827e2341779c85e"} Jan 29 08:15:17 crc kubenswrapper[4861]: I0129 08:15:17.743866 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-586997dfdc-nzlgn" event={"ID":"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0","Type":"ContainerStarted","Data":"ee9759de268d60fe17ad5366966b5c6cd74892684c38c3ffa383c4768eab99ea"} Jan 29 08:15:17 crc kubenswrapper[4861]: I0129 08:15:17.744815 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:17 crc kubenswrapper[4861]: I0129 08:15:17.744835 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-586997dfdc-nzlgn" event={"ID":"e5dd14c4-275b-46ce-9c6d-cbba0800e2a0","Type":"ContainerStarted","Data":"15c5351a677fd5940037563be83d92eab19d7ccedf2d21416497d12a31b3f976"} Jan 29 08:15:17 crc kubenswrapper[4861]: I0129 08:15:17.783640 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-586997dfdc-nzlgn" podStartSLOduration=3.783618383 podStartE2EDuration="3.783618383s" podCreationTimestamp="2026-01-29 08:15:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:15:17.778978141 +0000 UTC m=+6009.450472698" watchObservedRunningTime="2026-01-29 08:15:17.783618383 +0000 UTC m=+6009.455112940" Jan 29 08:15:18 crc kubenswrapper[4861]: I0129 08:15:18.767925 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.622688 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nkjb5"] Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.625664 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.637553 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nkjb5"] Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.661610 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-utilities\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.661805 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlg6h\" (UniqueName: \"kubernetes.io/projected/c77148d1-f23a-4707-a6c6-e648422e83eb-kube-api-access-hlg6h\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.661837 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-catalog-content\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.763866 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-utilities\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.764053 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlg6h\" (UniqueName: \"kubernetes.io/projected/c77148d1-f23a-4707-a6c6-e648422e83eb-kube-api-access-hlg6h\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.764096 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-catalog-content\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.764663 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-catalog-content\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.764663 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-utilities\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.783235 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlg6h\" (UniqueName: \"kubernetes.io/projected/c77148d1-f23a-4707-a6c6-e648422e83eb-kube-api-access-hlg6h\") pod \"community-operators-nkjb5\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:26 crc kubenswrapper[4861]: I0129 08:15:26.961297 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:27 crc kubenswrapper[4861]: I0129 08:15:27.036694 4861 scope.go:117] "RemoveContainer" containerID="b3fd5f59f1baea8946161da2adadd03b06a4fef3c6184f206e96569a0f8420dc" Jan 29 08:15:27 crc kubenswrapper[4861]: I0129 08:15:27.855716 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nkjb5"] Jan 29 08:15:28 crc kubenswrapper[4861]: I0129 08:15:28.894232 4861 generic.go:334] "Generic (PLEG): container finished" podID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerID="b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe" exitCode=0 Jan 29 08:15:28 crc kubenswrapper[4861]: I0129 08:15:28.894268 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nkjb5" event={"ID":"c77148d1-f23a-4707-a6c6-e648422e83eb","Type":"ContainerDied","Data":"b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe"} Jan 29 08:15:28 crc kubenswrapper[4861]: I0129 08:15:28.894504 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nkjb5" event={"ID":"c77148d1-f23a-4707-a6c6-e648422e83eb","Type":"ContainerStarted","Data":"c3211d30868f10aaa7ea001481ae91e0dc2dd7cf01103f6f9e54d2a68101076d"} Jan 29 08:15:30 crc kubenswrapper[4861]: I0129 08:15:30.940664 4861 generic.go:334] "Generic (PLEG): container finished" podID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerID="6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821" exitCode=0 Jan 29 08:15:30 crc kubenswrapper[4861]: I0129 08:15:30.940728 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nkjb5" event={"ID":"c77148d1-f23a-4707-a6c6-e648422e83eb","Type":"ContainerDied","Data":"6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821"} Jan 29 08:15:31 crc kubenswrapper[4861]: I0129 08:15:31.952434 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nkjb5" event={"ID":"c77148d1-f23a-4707-a6c6-e648422e83eb","Type":"ContainerStarted","Data":"cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648"} Jan 29 08:15:31 crc kubenswrapper[4861]: I0129 08:15:31.972310 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nkjb5" podStartSLOduration=3.416038182 podStartE2EDuration="5.972294129s" podCreationTimestamp="2026-01-29 08:15:26 +0000 UTC" firstStartedPulling="2026-01-29 08:15:28.896371296 +0000 UTC m=+6020.567865853" lastFinishedPulling="2026-01-29 08:15:31.452627243 +0000 UTC m=+6023.124121800" observedRunningTime="2026-01-29 08:15:31.969158787 +0000 UTC m=+6023.640653344" watchObservedRunningTime="2026-01-29 08:15:31.972294129 +0000 UTC m=+6023.643788686" Jan 29 08:15:31 crc kubenswrapper[4861]: I0129 08:15:31.991448 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-7b4hh" Jan 29 08:15:34 crc kubenswrapper[4861]: I0129 08:15:34.573486 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:34 crc kubenswrapper[4861]: I0129 08:15:34.600987 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-586997dfdc-nzlgn" Jan 29 08:15:34 crc kubenswrapper[4861]: I0129 08:15:34.774068 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-api-cd49fd867-jr7th"] Jan 29 08:15:34 crc kubenswrapper[4861]: I0129 08:15:34.774325 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-api-cd49fd867-jr7th" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="octavia-api" containerID="cri-o://dd5edcf1b954de97d3719158099bf8a02e64eae4e4f3f8935eb40426a71882b5" gracePeriod=30 Jan 29 08:15:34 crc kubenswrapper[4861]: I0129 08:15:34.774448 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-api-cd49fd867-jr7th" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="octavia-api-provider-agent" containerID="cri-o://e8092efd3846afded23206571f13cd3fa241baf304315972a0f3b9e1412df5fb" gracePeriod=30 Jan 29 08:15:35 crc kubenswrapper[4861]: I0129 08:15:35.995505 4861 generic.go:334] "Generic (PLEG): container finished" podID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerID="e8092efd3846afded23206571f13cd3fa241baf304315972a0f3b9e1412df5fb" exitCode=0 Jan 29 08:15:35 crc kubenswrapper[4861]: I0129 08:15:35.995847 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-cd49fd867-jr7th" event={"ID":"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89","Type":"ContainerDied","Data":"e8092efd3846afded23206571f13cd3fa241baf304315972a0f3b9e1412df5fb"} Jan 29 08:15:36 crc kubenswrapper[4861]: I0129 08:15:36.961461 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:36 crc kubenswrapper[4861]: I0129 08:15:36.961916 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:37 crc kubenswrapper[4861]: I0129 08:15:37.042134 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:37 crc kubenswrapper[4861]: I0129 08:15:37.112010 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:37 crc kubenswrapper[4861]: I0129 08:15:37.284778 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nkjb5"] Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.015910 4861 generic.go:334] "Generic (PLEG): container finished" podID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerID="dd5edcf1b954de97d3719158099bf8a02e64eae4e4f3f8935eb40426a71882b5" exitCode=0 Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.016015 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-cd49fd867-jr7th" event={"ID":"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89","Type":"ContainerDied","Data":"dd5edcf1b954de97d3719158099bf8a02e64eae4e4f3f8935eb40426a71882b5"} Jan 29 08:15:38 crc kubenswrapper[4861]: E0129 08:15:38.054886 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98bdfc02_3b3b_4f35_b472_f71f5bbfdc89.slice/crio-conmon-dd5edcf1b954de97d3719158099bf8a02e64eae4e4f3f8935eb40426a71882b5.scope\": RecentStats: unable to find data in memory cache]" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.399440 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.510865 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-combined-ca-bundle\") pod \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.511106 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data-merged\") pod \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.511153 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data\") pod \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.511181 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-octavia-run\") pod \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.511285 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-scripts\") pod \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.511315 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-ovndb-tls-certs\") pod \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\" (UID: \"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89\") " Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.511674 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-octavia-run" (OuterVolumeSpecName: "octavia-run") pod "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" (UID: "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89"). InnerVolumeSpecName "octavia-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.511976 4861 reconciler_common.go:293] "Volume detached for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-octavia-run\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.516972 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-scripts" (OuterVolumeSpecName: "scripts") pod "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" (UID: "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.520151 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data" (OuterVolumeSpecName: "config-data") pod "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" (UID: "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.603727 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" (UID: "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.615033 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data-merged\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.615233 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.615255 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.622431 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" (UID: "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.717423 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.723281 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" (UID: "98bdfc02-3b3b-4f35-b472-f71f5bbfdc89"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:38 crc kubenswrapper[4861]: I0129 08:15:38.818780 4861 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.028226 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-cd49fd867-jr7th" event={"ID":"98bdfc02-3b3b-4f35-b472-f71f5bbfdc89","Type":"ContainerDied","Data":"f89ab9e9b6b15a6a73269a95d9fb905567fd20927b169ac14634cf95ddd6e3de"} Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.028260 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-cd49fd867-jr7th" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.028335 4861 scope.go:117] "RemoveContainer" containerID="e8092efd3846afded23206571f13cd3fa241baf304315972a0f3b9e1412df5fb" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.028360 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nkjb5" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerName="registry-server" containerID="cri-o://cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648" gracePeriod=2 Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.232128 4861 scope.go:117] "RemoveContainer" containerID="dd5edcf1b954de97d3719158099bf8a02e64eae4e4f3f8935eb40426a71882b5" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.273560 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-api-cd49fd867-jr7th"] Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.291104 4861 scope.go:117] "RemoveContainer" containerID="affd3286942ce39d651b34c9aa87a34e6c12197b4e87acc27070237b5f209c5d" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.291985 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-api-cd49fd867-jr7th"] Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.560597 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.666214 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-65dd99cb46-gzv86"] Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.666423 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" podUID="b516abb5-d52a-4fc7-9169-178bb103a5b4" containerName="octavia-amphora-httpd" containerID="cri-o://0af1bc2f068aa0f46df2d0bb7217ee60022ae52e5ca33586931104bbf67aae87" gracePeriod=30 Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.745318 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-utilities\") pod \"c77148d1-f23a-4707-a6c6-e648422e83eb\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.745362 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlg6h\" (UniqueName: \"kubernetes.io/projected/c77148d1-f23a-4707-a6c6-e648422e83eb-kube-api-access-hlg6h\") pod \"c77148d1-f23a-4707-a6c6-e648422e83eb\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.745413 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-catalog-content\") pod \"c77148d1-f23a-4707-a6c6-e648422e83eb\" (UID: \"c77148d1-f23a-4707-a6c6-e648422e83eb\") " Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.746232 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-utilities" (OuterVolumeSpecName: "utilities") pod "c77148d1-f23a-4707-a6c6-e648422e83eb" (UID: "c77148d1-f23a-4707-a6c6-e648422e83eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.749927 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c77148d1-f23a-4707-a6c6-e648422e83eb-kube-api-access-hlg6h" (OuterVolumeSpecName: "kube-api-access-hlg6h") pod "c77148d1-f23a-4707-a6c6-e648422e83eb" (UID: "c77148d1-f23a-4707-a6c6-e648422e83eb"). InnerVolumeSpecName "kube-api-access-hlg6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.798037 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c77148d1-f23a-4707-a6c6-e648422e83eb" (UID: "c77148d1-f23a-4707-a6c6-e648422e83eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.846932 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.846966 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlg6h\" (UniqueName: \"kubernetes.io/projected/c77148d1-f23a-4707-a6c6-e648422e83eb-kube-api-access-hlg6h\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:39 crc kubenswrapper[4861]: I0129 08:15:39.846977 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77148d1-f23a-4707-a6c6-e648422e83eb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.043654 4861 generic.go:334] "Generic (PLEG): container finished" podID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerID="cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648" exitCode=0 Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.043718 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nkjb5" event={"ID":"c77148d1-f23a-4707-a6c6-e648422e83eb","Type":"ContainerDied","Data":"cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648"} Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.043746 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nkjb5" event={"ID":"c77148d1-f23a-4707-a6c6-e648422e83eb","Type":"ContainerDied","Data":"c3211d30868f10aaa7ea001481ae91e0dc2dd7cf01103f6f9e54d2a68101076d"} Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.043767 4861 scope.go:117] "RemoveContainer" containerID="cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.043888 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nkjb5" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.055310 4861 generic.go:334] "Generic (PLEG): container finished" podID="b516abb5-d52a-4fc7-9169-178bb103a5b4" containerID="0af1bc2f068aa0f46df2d0bb7217ee60022ae52e5ca33586931104bbf67aae87" exitCode=0 Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.055361 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" event={"ID":"b516abb5-d52a-4fc7-9169-178bb103a5b4","Type":"ContainerDied","Data":"0af1bc2f068aa0f46df2d0bb7217ee60022ae52e5ca33586931104bbf67aae87"} Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.104673 4861 scope.go:117] "RemoveContainer" containerID="6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.121189 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nkjb5"] Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.130172 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nkjb5"] Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.130441 4861 scope.go:117] "RemoveContainer" containerID="b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.209162 4861 scope.go:117] "RemoveContainer" containerID="cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648" Jan 29 08:15:40 crc kubenswrapper[4861]: E0129 08:15:40.209680 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648\": container with ID starting with cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648 not found: ID does not exist" containerID="cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.209726 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648"} err="failed to get container status \"cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648\": rpc error: code = NotFound desc = could not find container \"cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648\": container with ID starting with cd0ae576425bd873cfaea000ff64fe902a2cab7f96b99fd53e2181850f34c648 not found: ID does not exist" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.209751 4861 scope.go:117] "RemoveContainer" containerID="6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821" Jan 29 08:15:40 crc kubenswrapper[4861]: E0129 08:15:40.210104 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821\": container with ID starting with 6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821 not found: ID does not exist" containerID="6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.210138 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821"} err="failed to get container status \"6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821\": rpc error: code = NotFound desc = could not find container \"6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821\": container with ID starting with 6a44852f61fcf83f7fdd3406ae4a760cd851baf0fe4ce3436367ed96828fa821 not found: ID does not exist" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.210157 4861 scope.go:117] "RemoveContainer" containerID="b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe" Jan 29 08:15:40 crc kubenswrapper[4861]: E0129 08:15:40.210458 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe\": container with ID starting with b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe not found: ID does not exist" containerID="b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.210528 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe"} err="failed to get container status \"b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe\": rpc error: code = NotFound desc = could not find container \"b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe\": container with ID starting with b084217e6033da74129bd197fe0c6746f646bd4035f38afaa7324bc977132ebe not found: ID does not exist" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.249814 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.259978 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/b516abb5-d52a-4fc7-9169-178bb103a5b4-amphora-image\") pod \"b516abb5-d52a-4fc7-9169-178bb103a5b4\" (UID: \"b516abb5-d52a-4fc7-9169-178bb103a5b4\") " Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.260411 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b516abb5-d52a-4fc7-9169-178bb103a5b4-httpd-config\") pod \"b516abb5-d52a-4fc7-9169-178bb103a5b4\" (UID: \"b516abb5-d52a-4fc7-9169-178bb103a5b4\") " Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.311244 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b516abb5-d52a-4fc7-9169-178bb103a5b4-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b516abb5-d52a-4fc7-9169-178bb103a5b4" (UID: "b516abb5-d52a-4fc7-9169-178bb103a5b4"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.354399 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b516abb5-d52a-4fc7-9169-178bb103a5b4-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "b516abb5-d52a-4fc7-9169-178bb103a5b4" (UID: "b516abb5-d52a-4fc7-9169-178bb103a5b4"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.363254 4861 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/b516abb5-d52a-4fc7-9169-178bb103a5b4-amphora-image\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:40 crc kubenswrapper[4861]: I0129 08:15:40.363278 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b516abb5-d52a-4fc7-9169-178bb103a5b4-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:15:41 crc kubenswrapper[4861]: I0129 08:15:41.070401 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" event={"ID":"b516abb5-d52a-4fc7-9169-178bb103a5b4","Type":"ContainerDied","Data":"b08c01114767090be0e054ce718b7b1feea8d4d6150f6aea812519cc7bf99e13"} Jan 29 08:15:41 crc kubenswrapper[4861]: I0129 08:15:41.070464 4861 scope.go:117] "RemoveContainer" containerID="0af1bc2f068aa0f46df2d0bb7217ee60022ae52e5ca33586931104bbf67aae87" Jan 29 08:15:41 crc kubenswrapper[4861]: I0129 08:15:41.070478 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-65dd99cb46-gzv86" Jan 29 08:15:41 crc kubenswrapper[4861]: I0129 08:15:41.115507 4861 scope.go:117] "RemoveContainer" containerID="1bf280a77d0eab6625ecf6ae606269b3aa49e27ffbe6168ee50a6d76c0916cab" Jan 29 08:15:41 crc kubenswrapper[4861]: I0129 08:15:41.152825 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" path="/var/lib/kubelet/pods/98bdfc02-3b3b-4f35-b472-f71f5bbfdc89/volumes" Jan 29 08:15:41 crc kubenswrapper[4861]: I0129 08:15:41.154157 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" path="/var/lib/kubelet/pods/c77148d1-f23a-4707-a6c6-e648422e83eb/volumes" Jan 29 08:15:41 crc kubenswrapper[4861]: I0129 08:15:41.155194 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-65dd99cb46-gzv86"] Jan 29 08:15:41 crc kubenswrapper[4861]: I0129 08:15:41.155243 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-65dd99cb46-gzv86"] Jan 29 08:15:43 crc kubenswrapper[4861]: I0129 08:15:43.136176 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b516abb5-d52a-4fc7-9169-178bb103a5b4" path="/var/lib/kubelet/pods/b516abb5-d52a-4fc7-9169-178bb103a5b4/volumes" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.519368 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-nrhlj"] Jan 29 08:16:06 crc kubenswrapper[4861]: E0129 08:16:06.520460 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerName="extract-content" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520479 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerName="extract-content" Jan 29 08:16:06 crc kubenswrapper[4861]: E0129 08:16:06.520493 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="octavia-api-provider-agent" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520504 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="octavia-api-provider-agent" Jan 29 08:16:06 crc kubenswrapper[4861]: E0129 08:16:06.520528 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="init" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520536 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="init" Jan 29 08:16:06 crc kubenswrapper[4861]: E0129 08:16:06.520551 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b516abb5-d52a-4fc7-9169-178bb103a5b4" containerName="octavia-amphora-httpd" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520561 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b516abb5-d52a-4fc7-9169-178bb103a5b4" containerName="octavia-amphora-httpd" Jan 29 08:16:06 crc kubenswrapper[4861]: E0129 08:16:06.520578 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerName="extract-utilities" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520586 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerName="extract-utilities" Jan 29 08:16:06 crc kubenswrapper[4861]: E0129 08:16:06.520604 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="octavia-api" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520612 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="octavia-api" Jan 29 08:16:06 crc kubenswrapper[4861]: E0129 08:16:06.520631 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b516abb5-d52a-4fc7-9169-178bb103a5b4" containerName="init" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520640 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b516abb5-d52a-4fc7-9169-178bb103a5b4" containerName="init" Jan 29 08:16:06 crc kubenswrapper[4861]: E0129 08:16:06.520650 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerName="registry-server" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520674 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerName="registry-server" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520886 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="octavia-api" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520904 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c77148d1-f23a-4707-a6c6-e648422e83eb" containerName="registry-server" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520918 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="98bdfc02-3b3b-4f35-b472-f71f5bbfdc89" containerName="octavia-api-provider-agent" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.520942 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b516abb5-d52a-4fc7-9169-178bb103a5b4" containerName="octavia-amphora-httpd" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.522455 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.526672 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.528177 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.529648 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.539518 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-nrhlj"] Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.624214 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-scripts\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.624290 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/51077d9f-c090-4d63-87e3-e4289c5a672b-hm-ports\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.624319 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-combined-ca-bundle\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.624407 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/51077d9f-c090-4d63-87e3-e4289c5a672b-config-data-merged\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.624819 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-config-data\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.624842 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-amphora-certs\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.727327 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-config-data\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.727505 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-amphora-certs\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.727713 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-scripts\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.727816 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/51077d9f-c090-4d63-87e3-e4289c5a672b-hm-ports\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.727890 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-combined-ca-bundle\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.727933 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/51077d9f-c090-4d63-87e3-e4289c5a672b-config-data-merged\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.728919 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/51077d9f-c090-4d63-87e3-e4289c5a672b-hm-ports\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.728983 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/51077d9f-c090-4d63-87e3-e4289c5a672b-config-data-merged\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.733428 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-amphora-certs\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.733835 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-combined-ca-bundle\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.733904 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-config-data\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.736463 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51077d9f-c090-4d63-87e3-e4289c5a672b-scripts\") pod \"octavia-healthmanager-nrhlj\" (UID: \"51077d9f-c090-4d63-87e3-e4289c5a672b\") " pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:06 crc kubenswrapper[4861]: I0129 08:16:06.844244 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:07 crc kubenswrapper[4861]: I0129 08:16:07.579118 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-nrhlj"] Jan 29 08:16:07 crc kubenswrapper[4861]: W0129 08:16:07.582290 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51077d9f_c090_4d63_87e3_e4289c5a672b.slice/crio-d187817019e16267d72237199d9264fa371e6f999e3b6e77ca9517c7c924c06e WatchSource:0}: Error finding container d187817019e16267d72237199d9264fa371e6f999e3b6e77ca9517c7c924c06e: Status 404 returned error can't find the container with id d187817019e16267d72237199d9264fa371e6f999e3b6e77ca9517c7c924c06e Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.129624 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-z4884"] Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.131757 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.187717 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.187895 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.198502 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-z4884"] Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.287335 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6a35cb82-3c8f-4c31-9efc-a045c24053b7-config-data-merged\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.287390 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-amphora-certs\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.287417 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-config-data\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.287808 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/6a35cb82-3c8f-4c31-9efc-a045c24053b7-hm-ports\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.287968 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-scripts\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.288032 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-combined-ca-bundle\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.369812 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-nrhlj" event={"ID":"51077d9f-c090-4d63-87e3-e4289c5a672b","Type":"ContainerStarted","Data":"7afa102303d124878851b8e4afb446023c9d36b616065c1429661dac1f1ac1d3"} Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.370094 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-nrhlj" event={"ID":"51077d9f-c090-4d63-87e3-e4289c5a672b","Type":"ContainerStarted","Data":"d187817019e16267d72237199d9264fa371e6f999e3b6e77ca9517c7c924c06e"} Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.393910 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-scripts\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.393991 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-combined-ca-bundle\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.394095 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6a35cb82-3c8f-4c31-9efc-a045c24053b7-config-data-merged\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.394138 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-amphora-certs\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.394173 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-config-data\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.395048 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6a35cb82-3c8f-4c31-9efc-a045c24053b7-config-data-merged\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.395770 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/6a35cb82-3c8f-4c31-9efc-a045c24053b7-hm-ports\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.396835 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/6a35cb82-3c8f-4c31-9efc-a045c24053b7-hm-ports\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.401207 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-combined-ca-bundle\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.401466 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-config-data\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.401678 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-scripts\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.401727 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/6a35cb82-3c8f-4c31-9efc-a045c24053b7-amphora-certs\") pod \"octavia-housekeeping-z4884\" (UID: \"6a35cb82-3c8f-4c31-9efc-a045c24053b7\") " pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:08 crc kubenswrapper[4861]: I0129 08:16:08.503321 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:09 crc kubenswrapper[4861]: I0129 08:16:09.072836 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-z4884"] Jan 29 08:16:09 crc kubenswrapper[4861]: W0129 08:16:09.078515 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a35cb82_3c8f_4c31_9efc_a045c24053b7.slice/crio-9a4305f7dee5a6f48f65eca0c8fa6d1b0ad948149dce97dc8aa825576c5f481c WatchSource:0}: Error finding container 9a4305f7dee5a6f48f65eca0c8fa6d1b0ad948149dce97dc8aa825576c5f481c: Status 404 returned error can't find the container with id 9a4305f7dee5a6f48f65eca0c8fa6d1b0ad948149dce97dc8aa825576c5f481c Jan 29 08:16:09 crc kubenswrapper[4861]: I0129 08:16:09.380839 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-z4884" event={"ID":"6a35cb82-3c8f-4c31-9efc-a045c24053b7","Type":"ContainerStarted","Data":"9a4305f7dee5a6f48f65eca0c8fa6d1b0ad948149dce97dc8aa825576c5f481c"} Jan 29 08:16:09 crc kubenswrapper[4861]: I0129 08:16:09.884794 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-x5hh9"] Jan 29 08:16:09 crc kubenswrapper[4861]: I0129 08:16:09.886596 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:09 crc kubenswrapper[4861]: I0129 08:16:09.889331 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Jan 29 08:16:09 crc kubenswrapper[4861]: I0129 08:16:09.890200 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Jan 29 08:16:09 crc kubenswrapper[4861]: I0129 08:16:09.894194 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-x5hh9"] Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.045859 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-amphora-certs\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.047255 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-scripts\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.047339 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-combined-ca-bundle\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.047505 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e2550201-47c9-4d04-8161-24265932e7e4-config-data-merged\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.047557 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/e2550201-47c9-4d04-8161-24265932e7e4-hm-ports\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.047595 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-config-data\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.150862 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-scripts\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.151621 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-combined-ca-bundle\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.151757 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e2550201-47c9-4d04-8161-24265932e7e4-config-data-merged\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.151789 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/e2550201-47c9-4d04-8161-24265932e7e4-hm-ports\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.151810 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-config-data\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.151865 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-amphora-certs\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.152297 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/e2550201-47c9-4d04-8161-24265932e7e4-config-data-merged\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.152817 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/e2550201-47c9-4d04-8161-24265932e7e4-hm-ports\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.157685 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-config-data\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.157930 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-amphora-certs\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.158415 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-combined-ca-bundle\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.158954 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2550201-47c9-4d04-8161-24265932e7e4-scripts\") pod \"octavia-worker-x5hh9\" (UID: \"e2550201-47c9-4d04-8161-24265932e7e4\") " pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.201537 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.392537 4861 generic.go:334] "Generic (PLEG): container finished" podID="51077d9f-c090-4d63-87e3-e4289c5a672b" containerID="7afa102303d124878851b8e4afb446023c9d36b616065c1429661dac1f1ac1d3" exitCode=0 Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.392591 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-nrhlj" event={"ID":"51077d9f-c090-4d63-87e3-e4289c5a672b","Type":"ContainerDied","Data":"7afa102303d124878851b8e4afb446023c9d36b616065c1429661dac1f1ac1d3"} Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.718131 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-nrhlj"] Jan 29 08:16:10 crc kubenswrapper[4861]: I0129 08:16:10.931994 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-x5hh9"] Jan 29 08:16:11 crc kubenswrapper[4861]: I0129 08:16:11.403470 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-nrhlj" event={"ID":"51077d9f-c090-4d63-87e3-e4289c5a672b","Type":"ContainerStarted","Data":"556d412bd1e7bc228d4fa7d42504b340b2a235876ef9275e0ec6861dfa46fbf4"} Jan 29 08:16:11 crc kubenswrapper[4861]: I0129 08:16:11.403718 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:11 crc kubenswrapper[4861]: I0129 08:16:11.405591 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-z4884" event={"ID":"6a35cb82-3c8f-4c31-9efc-a045c24053b7","Type":"ContainerStarted","Data":"3a41a0c0b608ae6c728c53898a22866943380fb6397ea99bdaf9302a6be8ed7d"} Jan 29 08:16:11 crc kubenswrapper[4861]: I0129 08:16:11.407549 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-x5hh9" event={"ID":"e2550201-47c9-4d04-8161-24265932e7e4","Type":"ContainerStarted","Data":"d3a8c7fdb2371a5e0d77a9e38cddfc980fbbe4aa5bf8b4e495917074aa716dfa"} Jan 29 08:16:11 crc kubenswrapper[4861]: I0129 08:16:11.448933 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-nrhlj" podStartSLOduration=5.448917009 podStartE2EDuration="5.448917009s" podCreationTimestamp="2026-01-29 08:16:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:16:11.424153878 +0000 UTC m=+6063.095648435" watchObservedRunningTime="2026-01-29 08:16:11.448917009 +0000 UTC m=+6063.120411566" Jan 29 08:16:12 crc kubenswrapper[4861]: I0129 08:16:12.416258 4861 generic.go:334] "Generic (PLEG): container finished" podID="6a35cb82-3c8f-4c31-9efc-a045c24053b7" containerID="3a41a0c0b608ae6c728c53898a22866943380fb6397ea99bdaf9302a6be8ed7d" exitCode=0 Jan 29 08:16:12 crc kubenswrapper[4861]: I0129 08:16:12.416344 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-z4884" event={"ID":"6a35cb82-3c8f-4c31-9efc-a045c24053b7","Type":"ContainerDied","Data":"3a41a0c0b608ae6c728c53898a22866943380fb6397ea99bdaf9302a6be8ed7d"} Jan 29 08:16:13 crc kubenswrapper[4861]: I0129 08:16:13.425738 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-z4884" event={"ID":"6a35cb82-3c8f-4c31-9efc-a045c24053b7","Type":"ContainerStarted","Data":"369f9fbe20d64100b4ce20597627e90dd84f1a829f3c277133e1d97b9a6410c0"} Jan 29 08:16:13 crc kubenswrapper[4861]: I0129 08:16:13.428022 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:13 crc kubenswrapper[4861]: I0129 08:16:13.453399 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-z4884" podStartSLOduration=4.118600691 podStartE2EDuration="5.453381354s" podCreationTimestamp="2026-01-29 08:16:08 +0000 UTC" firstStartedPulling="2026-01-29 08:16:09.080806981 +0000 UTC m=+6060.752301538" lastFinishedPulling="2026-01-29 08:16:10.415587644 +0000 UTC m=+6062.087082201" observedRunningTime="2026-01-29 08:16:13.447711985 +0000 UTC m=+6065.119206562" watchObservedRunningTime="2026-01-29 08:16:13.453381354 +0000 UTC m=+6065.124875911" Jan 29 08:16:14 crc kubenswrapper[4861]: I0129 08:16:14.440310 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-x5hh9" event={"ID":"e2550201-47c9-4d04-8161-24265932e7e4","Type":"ContainerStarted","Data":"e225889c31ea635db0c25f90d2a18ab3b8970e6947447c6bd3a0f47bd772886e"} Jan 29 08:16:15 crc kubenswrapper[4861]: I0129 08:16:15.471974 4861 generic.go:334] "Generic (PLEG): container finished" podID="e2550201-47c9-4d04-8161-24265932e7e4" containerID="e225889c31ea635db0c25f90d2a18ab3b8970e6947447c6bd3a0f47bd772886e" exitCode=0 Jan 29 08:16:15 crc kubenswrapper[4861]: I0129 08:16:15.472134 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-x5hh9" event={"ID":"e2550201-47c9-4d04-8161-24265932e7e4","Type":"ContainerDied","Data":"e225889c31ea635db0c25f90d2a18ab3b8970e6947447c6bd3a0f47bd772886e"} Jan 29 08:16:16 crc kubenswrapper[4861]: I0129 08:16:16.485130 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-x5hh9" event={"ID":"e2550201-47c9-4d04-8161-24265932e7e4","Type":"ContainerStarted","Data":"63bf5bd44dad43c16ac1f4af8106eb407b7f777fb630fbe22f55fc432d49d32f"} Jan 29 08:16:16 crc kubenswrapper[4861]: I0129 08:16:16.487309 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:16 crc kubenswrapper[4861]: I0129 08:16:16.831562 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-x5hh9" podStartSLOduration=5.250346774 podStartE2EDuration="7.831539966s" podCreationTimestamp="2026-01-29 08:16:09 +0000 UTC" firstStartedPulling="2026-01-29 08:16:10.946162687 +0000 UTC m=+6062.617657244" lastFinishedPulling="2026-01-29 08:16:13.527355879 +0000 UTC m=+6065.198850436" observedRunningTime="2026-01-29 08:16:16.821515483 +0000 UTC m=+6068.493010040" watchObservedRunningTime="2026-01-29 08:16:16.831539966 +0000 UTC m=+6068.503034533" Jan 29 08:16:21 crc kubenswrapper[4861]: I0129 08:16:21.877756 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-nrhlj" Jan 29 08:16:23 crc kubenswrapper[4861]: I0129 08:16:23.541993 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-z4884" Jan 29 08:16:25 crc kubenswrapper[4861]: I0129 08:16:25.234820 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-x5hh9" Jan 29 08:16:26 crc kubenswrapper[4861]: I0129 08:16:26.087681 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-a323-account-create-update-qmmlf"] Jan 29 08:16:26 crc kubenswrapper[4861]: I0129 08:16:26.101609 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-bbs2g"] Jan 29 08:16:26 crc kubenswrapper[4861]: I0129 08:16:26.115834 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-a323-account-create-update-qmmlf"] Jan 29 08:16:26 crc kubenswrapper[4861]: I0129 08:16:26.134744 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-bbs2g"] Jan 29 08:16:27 crc kubenswrapper[4861]: I0129 08:16:27.138609 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00b75b77-6e85-4af7-be07-ed3bd7c338db" path="/var/lib/kubelet/pods/00b75b77-6e85-4af7-be07-ed3bd7c338db/volumes" Jan 29 08:16:27 crc kubenswrapper[4861]: I0129 08:16:27.140426 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="134b5d13-5549-459c-8a27-2e5908651fb0" path="/var/lib/kubelet/pods/134b5d13-5549-459c-8a27-2e5908651fb0/volumes" Jan 29 08:16:27 crc kubenswrapper[4861]: I0129 08:16:27.396572 4861 scope.go:117] "RemoveContainer" containerID="40a15692810876a8d71c0c27b58259b2f6e27e1935f49a39d3c1ca79b8166970" Jan 29 08:16:27 crc kubenswrapper[4861]: I0129 08:16:27.458030 4861 scope.go:117] "RemoveContainer" containerID="825ae3007e911e3b112efced3bd22de746000938f42aa19c9ececbace473b492" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.047569 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-z58jp"] Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.058133 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-z58jp"] Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.133659 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b81a3989-af02-4421-8674-c39b2dd81601" path="/var/lib/kubelet/pods/b81a3989-af02-4421-8674-c39b2dd81601/volumes" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.822516 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-59b7bfd865-j6jn9"] Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.824240 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.826736 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-74mpj" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.836570 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.838012 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.838288 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.851746 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-59b7bfd865-j6jn9"] Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.925785 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.926305 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerName="glance-log" containerID="cri-o://1c665bc05a78b77d6a80786dbce31909183c32f4ca989b40fd63de4386b8a22b" gracePeriod=30 Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.930604 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerName="glance-httpd" containerID="cri-o://5ae1c90c85b006c4ff066b1990e099f99eeec0dde3dd97e124951475df5bd1f6" gracePeriod=30 Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.946697 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-scripts\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.946745 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxzf2\" (UniqueName: \"kubernetes.io/projected/39084618-2c91-4f07-9f78-baea7f30b91c-kube-api-access-hxzf2\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.946784 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39084618-2c91-4f07-9f78-baea7f30b91c-logs\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.946809 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-config-data\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.946845 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39084618-2c91-4f07-9f78-baea7f30b91c-horizon-secret-key\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.976171 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.976506 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerName="glance-log" containerID="cri-o://27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d" gracePeriod=30 Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.977144 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerName="glance-httpd" containerID="cri-o://fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f" gracePeriod=30 Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.995519 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-85d8575b45-h5ntt"] Jan 29 08:16:35 crc kubenswrapper[4861]: I0129 08:16:35.997249 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.009764 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85d8575b45-h5ntt"] Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.048750 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-scripts\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.048803 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxzf2\" (UniqueName: \"kubernetes.io/projected/39084618-2c91-4f07-9f78-baea7f30b91c-kube-api-access-hxzf2\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.048838 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39084618-2c91-4f07-9f78-baea7f30b91c-logs\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.048862 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-config-data\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.048894 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39084618-2c91-4f07-9f78-baea7f30b91c-horizon-secret-key\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.051143 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39084618-2c91-4f07-9f78-baea7f30b91c-logs\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.051230 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-scripts\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.052339 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-config-data\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.066770 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39084618-2c91-4f07-9f78-baea7f30b91c-horizon-secret-key\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.070693 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxzf2\" (UniqueName: \"kubernetes.io/projected/39084618-2c91-4f07-9f78-baea7f30b91c-kube-api-access-hxzf2\") pod \"horizon-59b7bfd865-j6jn9\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.150924 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-scripts\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.151161 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-config-data\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.151271 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2652a34-9956-49b1-be3b-2be1309fd228-logs\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.151350 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcgd2\" (UniqueName: \"kubernetes.io/projected/f2652a34-9956-49b1-be3b-2be1309fd228-kube-api-access-zcgd2\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.151455 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f2652a34-9956-49b1-be3b-2be1309fd228-horizon-secret-key\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.181847 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.253357 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-scripts\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.253836 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-config-data\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.253896 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2652a34-9956-49b1-be3b-2be1309fd228-logs\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.253925 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcgd2\" (UniqueName: \"kubernetes.io/projected/f2652a34-9956-49b1-be3b-2be1309fd228-kube-api-access-zcgd2\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.253953 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f2652a34-9956-49b1-be3b-2be1309fd228-horizon-secret-key\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.254339 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-scripts\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.255231 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2652a34-9956-49b1-be3b-2be1309fd228-logs\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.256255 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-config-data\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.258317 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f2652a34-9956-49b1-be3b-2be1309fd228-horizon-secret-key\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.276326 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcgd2\" (UniqueName: \"kubernetes.io/projected/f2652a34-9956-49b1-be3b-2be1309fd228-kube-api-access-zcgd2\") pod \"horizon-85d8575b45-h5ntt\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.327534 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.688426 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-59b7bfd865-j6jn9"] Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.702603 4861 generic.go:334] "Generic (PLEG): container finished" podID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerID="27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d" exitCode=143 Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.702684 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1","Type":"ContainerDied","Data":"27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d"} Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.704280 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59b7bfd865-j6jn9" event={"ID":"39084618-2c91-4f07-9f78-baea7f30b91c","Type":"ContainerStarted","Data":"624070b42a0f93970ae0a241fba0e42d340dd938856f5404ca07086891cd1267"} Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.706781 4861 generic.go:334] "Generic (PLEG): container finished" podID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerID="1c665bc05a78b77d6a80786dbce31909183c32f4ca989b40fd63de4386b8a22b" exitCode=143 Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.706807 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d","Type":"ContainerDied","Data":"1c665bc05a78b77d6a80786dbce31909183c32f4ca989b40fd63de4386b8a22b"} Jan 29 08:16:36 crc kubenswrapper[4861]: W0129 08:16:36.841167 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2652a34_9956_49b1_be3b_2be1309fd228.slice/crio-bb96a2629d1b837b750f93def306960263319f6380c1b68272fc078cfca8f255 WatchSource:0}: Error finding container bb96a2629d1b837b750f93def306960263319f6380c1b68272fc078cfca8f255: Status 404 returned error can't find the container with id bb96a2629d1b837b750f93def306960263319f6380c1b68272fc078cfca8f255 Jan 29 08:16:36 crc kubenswrapper[4861]: I0129 08:16:36.841830 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85d8575b45-h5ntt"] Jan 29 08:16:37 crc kubenswrapper[4861]: I0129 08:16:37.735721 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d8575b45-h5ntt" event={"ID":"f2652a34-9956-49b1-be3b-2be1309fd228","Type":"ContainerStarted","Data":"bb96a2629d1b837b750f93def306960263319f6380c1b68272fc078cfca8f255"} Jan 29 08:16:37 crc kubenswrapper[4861]: I0129 08:16:37.869251 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-59b7bfd865-j6jn9"] Jan 29 08:16:37 crc kubenswrapper[4861]: I0129 08:16:37.898840 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-596fc8cbb6-w86n9"] Jan 29 08:16:37 crc kubenswrapper[4861]: I0129 08:16:37.900863 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:37 crc kubenswrapper[4861]: I0129 08:16:37.906408 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 29 08:16:37 crc kubenswrapper[4861]: I0129 08:16:37.938753 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-596fc8cbb6-w86n9"] Jan 29 08:16:37 crc kubenswrapper[4861]: I0129 08:16:37.978803 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85d8575b45-h5ntt"] Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.020642 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6f4b54cb6-vdr9c"] Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.024292 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-combined-ca-bundle\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.024349 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-tls-certs\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.024388 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zp4j4\" (UniqueName: \"kubernetes.io/projected/ed85a66d-a2c3-445d-a911-8c7932fd9728-kube-api-access-zp4j4\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.024427 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-secret-key\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.024461 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-scripts\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.024486 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed85a66d-a2c3-445d-a911-8c7932fd9728-logs\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.024523 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-config-data\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.025107 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.048007 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6f4b54cb6-vdr9c"] Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126371 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-tls-certs\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126504 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-config-data\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126602 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-scripts\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126683 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c569ed14-e911-4ead-ada8-270f32a1297f-logs\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126708 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-secret-key\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126818 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-combined-ca-bundle\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126839 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-combined-ca-bundle\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126874 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-config-data\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.126900 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-tls-certs\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.127003 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zp4j4\" (UniqueName: \"kubernetes.io/projected/ed85a66d-a2c3-445d-a911-8c7932fd9728-kube-api-access-zp4j4\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.127066 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgnjm\" (UniqueName: \"kubernetes.io/projected/c569ed14-e911-4ead-ada8-270f32a1297f-kube-api-access-hgnjm\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.127129 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-secret-key\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.127173 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-scripts\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.127222 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed85a66d-a2c3-445d-a911-8c7932fd9728-logs\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.127650 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed85a66d-a2c3-445d-a911-8c7932fd9728-logs\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.128843 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-config-data\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.130580 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-scripts\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.133754 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-secret-key\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.134278 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-combined-ca-bundle\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.136618 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-tls-certs\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.144685 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zp4j4\" (UniqueName: \"kubernetes.io/projected/ed85a66d-a2c3-445d-a911-8c7932fd9728-kube-api-access-zp4j4\") pod \"horizon-596fc8cbb6-w86n9\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.229461 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-combined-ca-bundle\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.229514 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-config-data\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.229584 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgnjm\" (UniqueName: \"kubernetes.io/projected/c569ed14-e911-4ead-ada8-270f32a1297f-kube-api-access-hgnjm\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.229632 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-tls-certs\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.229675 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-scripts\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.229714 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c569ed14-e911-4ead-ada8-270f32a1297f-logs\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.229734 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-secret-key\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.231265 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c569ed14-e911-4ead-ada8-270f32a1297f-logs\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.231813 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-config-data\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.232548 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-scripts\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.233554 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.234710 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-secret-key\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.237839 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-combined-ca-bundle\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.239562 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-tls-certs\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.249690 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgnjm\" (UniqueName: \"kubernetes.io/projected/c569ed14-e911-4ead-ada8-270f32a1297f-kube-api-access-hgnjm\") pod \"horizon-6f4b54cb6-vdr9c\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.349558 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.789457 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-596fc8cbb6-w86n9"] Jan 29 08:16:38 crc kubenswrapper[4861]: W0129 08:16:38.799165 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded85a66d_a2c3_445d_a911_8c7932fd9728.slice/crio-69dce663aad793a4ad423cd13bf98345f2e5c6975395bb77147133f3eeb5b8c2 WatchSource:0}: Error finding container 69dce663aad793a4ad423cd13bf98345f2e5c6975395bb77147133f3eeb5b8c2: Status 404 returned error can't find the container with id 69dce663aad793a4ad423cd13bf98345f2e5c6975395bb77147133f3eeb5b8c2 Jan 29 08:16:38 crc kubenswrapper[4861]: I0129 08:16:38.924819 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6f4b54cb6-vdr9c"] Jan 29 08:16:38 crc kubenswrapper[4861]: W0129 08:16:38.934352 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc569ed14_e911_4ead_ada8_270f32a1297f.slice/crio-a4675aa26f63c7e2e85820ff1031d541be92d709e05413f2eb5d2f5b3795da6a WatchSource:0}: Error finding container a4675aa26f63c7e2e85820ff1031d541be92d709e05413f2eb5d2f5b3795da6a: Status 404 returned error can't find the container with id a4675aa26f63c7e2e85820ff1031d541be92d709e05413f2eb5d2f5b3795da6a Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.725303 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.776711 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-config-data\") pod \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.776821 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9mfg\" (UniqueName: \"kubernetes.io/projected/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-kube-api-access-k9mfg\") pod \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.776899 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-internal-tls-certs\") pod \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.776949 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-scripts\") pod \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.776973 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-logs\") pod \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.776994 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-httpd-run\") pod \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.777830 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-combined-ca-bundle\") pod \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\" (UID: \"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1\") " Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.778032 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-logs" (OuterVolumeSpecName: "logs") pod "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" (UID: "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.778806 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.779003 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" (UID: "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.785193 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f4b54cb6-vdr9c" event={"ID":"c569ed14-e911-4ead-ada8-270f32a1297f","Type":"ContainerStarted","Data":"a4675aa26f63c7e2e85820ff1031d541be92d709e05413f2eb5d2f5b3795da6a"} Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.789792 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-kube-api-access-k9mfg" (OuterVolumeSpecName: "kube-api-access-k9mfg") pod "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" (UID: "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1"). InnerVolumeSpecName "kube-api-access-k9mfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.804290 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-scripts" (OuterVolumeSpecName: "scripts") pod "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" (UID: "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.810405 4861 generic.go:334] "Generic (PLEG): container finished" podID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerID="fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f" exitCode=0 Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.810499 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.810869 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1","Type":"ContainerDied","Data":"fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f"} Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.810927 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1","Type":"ContainerDied","Data":"4f7ba354214f91c713beebba3ed0e9f025fd698f6fc600c70bb0d9ad11c933d7"} Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.810948 4861 scope.go:117] "RemoveContainer" containerID="fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.815372 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-596fc8cbb6-w86n9" event={"ID":"ed85a66d-a2c3-445d-a911-8c7932fd9728","Type":"ContainerStarted","Data":"69dce663aad793a4ad423cd13bf98345f2e5c6975395bb77147133f3eeb5b8c2"} Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.821069 4861 generic.go:334] "Generic (PLEG): container finished" podID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerID="5ae1c90c85b006c4ff066b1990e099f99eeec0dde3dd97e124951475df5bd1f6" exitCode=0 Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.821122 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d","Type":"ContainerDied","Data":"5ae1c90c85b006c4ff066b1990e099f99eeec0dde3dd97e124951475df5bd1f6"} Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.840239 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" (UID: "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.860958 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-config-data" (OuterVolumeSpecName: "config-data") pod "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" (UID: "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.880820 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.880850 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.880860 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.880869 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.880880 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9mfg\" (UniqueName: \"kubernetes.io/projected/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-kube-api-access-k9mfg\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.883493 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" (UID: "80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:16:39 crc kubenswrapper[4861]: I0129 08:16:39.982260 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.165049 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.175117 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.194460 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:16:40 crc kubenswrapper[4861]: E0129 08:16:40.194905 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerName="glance-log" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.194918 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerName="glance-log" Jan 29 08:16:40 crc kubenswrapper[4861]: E0129 08:16:40.194937 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerName="glance-httpd" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.194943 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerName="glance-httpd" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.195167 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerName="glance-log" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.195184 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" containerName="glance-httpd" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.196180 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.199200 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.199464 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.205787 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.296294 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ff300e5-7a29-4ec7-974c-de163370f2f8-logs\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.296374 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.296464 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ff300e5-7a29-4ec7-974c-de163370f2f8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.296525 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.296558 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.296598 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f89ss\" (UniqueName: \"kubernetes.io/projected/1ff300e5-7a29-4ec7-974c-de163370f2f8-kube-api-access-f89ss\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.296621 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.399041 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ff300e5-7a29-4ec7-974c-de163370f2f8-logs\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.399145 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.399231 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ff300e5-7a29-4ec7-974c-de163370f2f8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.399289 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.399324 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.399367 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f89ss\" (UniqueName: \"kubernetes.io/projected/1ff300e5-7a29-4ec7-974c-de163370f2f8-kube-api-access-f89ss\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.399386 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.400686 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ff300e5-7a29-4ec7-974c-de163370f2f8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.400898 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ff300e5-7a29-4ec7-974c-de163370f2f8-logs\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.405794 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.407044 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.410356 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.419491 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f89ss\" (UniqueName: \"kubernetes.io/projected/1ff300e5-7a29-4ec7-974c-de163370f2f8-kube-api-access-f89ss\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.421322 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ff300e5-7a29-4ec7-974c-de163370f2f8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1ff300e5-7a29-4ec7-974c-de163370f2f8\") " pod="openstack/glance-default-internal-api-0" Jan 29 08:16:40 crc kubenswrapper[4861]: I0129 08:16:40.535967 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:41 crc kubenswrapper[4861]: I0129 08:16:41.127751 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1" path="/var/lib/kubelet/pods/80dd3b6d-4d47-4d5f-aa67-f6e0e34289d1/volumes" Jan 29 08:16:44 crc kubenswrapper[4861]: I0129 08:16:44.897123 4861 scope.go:117] "RemoveContainer" containerID="27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.019736 4861 scope.go:117] "RemoveContainer" containerID="fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f" Jan 29 08:16:45 crc kubenswrapper[4861]: E0129 08:16:45.020305 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f\": container with ID starting with fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f not found: ID does not exist" containerID="fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.020346 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f"} err="failed to get container status \"fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f\": rpc error: code = NotFound desc = could not find container \"fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f\": container with ID starting with fa8e8b1c716c538f3a7bfd48a6664a4f074c9275e24a0a9aa32cf5cdabb8f54f not found: ID does not exist" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.020371 4861 scope.go:117] "RemoveContainer" containerID="27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d" Jan 29 08:16:45 crc kubenswrapper[4861]: E0129 08:16:45.021038 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d\": container with ID starting with 27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d not found: ID does not exist" containerID="27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.021096 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d"} err="failed to get container status \"27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d\": rpc error: code = NotFound desc = could not find container \"27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d\": container with ID starting with 27bf96c2bd82f1688f3aa593537370333f46a7375e8a7569996f766726a8f40d not found: ID does not exist" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.036804 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.202744 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5l5kp\" (UniqueName: \"kubernetes.io/projected/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-kube-api-access-5l5kp\") pod \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.203005 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-logs\") pod \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.203102 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-combined-ca-bundle\") pod \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.203164 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-scripts\") pod \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.203209 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-config-data\") pod \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.203248 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-httpd-run\") pod \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.203268 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-public-tls-certs\") pod \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\" (UID: \"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d\") " Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.253671 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" (UID: "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.255787 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-logs" (OuterVolumeSpecName: "logs") pod "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" (UID: "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.271405 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-kube-api-access-5l5kp" (OuterVolumeSpecName: "kube-api-access-5l5kp") pod "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" (UID: "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d"). InnerVolumeSpecName "kube-api-access-5l5kp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.279023 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" (UID: "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.289940 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-scripts" (OuterVolumeSpecName: "scripts") pod "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" (UID: "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.312014 4861 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.312048 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5l5kp\" (UniqueName: \"kubernetes.io/projected/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-kube-api-access-5l5kp\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.312060 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.312072 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.312094 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.330733 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-config-data" (OuterVolumeSpecName: "config-data") pod "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" (UID: "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.330780 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" (UID: "8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.414374 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.414409 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.750506 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 08:16:45 crc kubenswrapper[4861]: W0129 08:16:45.759488 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ff300e5_7a29_4ec7_974c_de163370f2f8.slice/crio-93a66cba32b2d1b66a701bf0aab46106eed47a49083e8433a4c9a13b6121a5cf WatchSource:0}: Error finding container 93a66cba32b2d1b66a701bf0aab46106eed47a49083e8433a4c9a13b6121a5cf: Status 404 returned error can't find the container with id 93a66cba32b2d1b66a701bf0aab46106eed47a49083e8433a4c9a13b6121a5cf Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.893479 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59b7bfd865-j6jn9" event={"ID":"39084618-2c91-4f07-9f78-baea7f30b91c","Type":"ContainerStarted","Data":"a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.893519 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59b7bfd865-j6jn9" event={"ID":"39084618-2c91-4f07-9f78-baea7f30b91c","Type":"ContainerStarted","Data":"e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.893622 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-59b7bfd865-j6jn9" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" containerName="horizon-log" containerID="cri-o://e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767" gracePeriod=30 Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.893952 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-59b7bfd865-j6jn9" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" containerName="horizon" containerID="cri-o://a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588" gracePeriod=30 Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.898128 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-596fc8cbb6-w86n9" event={"ID":"ed85a66d-a2c3-445d-a911-8c7932fd9728","Type":"ContainerStarted","Data":"f7b8d6ce47ddf73017a21e8efb18b334c894b3cd5ad286a54caa82252c0511fc"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.898155 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-596fc8cbb6-w86n9" event={"ID":"ed85a66d-a2c3-445d-a911-8c7932fd9728","Type":"ContainerStarted","Data":"2ba67d3155a237d00b4a4d82a5a33069c26f881a25c1ecbdaf40ae1307dc8476"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.900146 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d8575b45-h5ntt" event={"ID":"f2652a34-9956-49b1-be3b-2be1309fd228","Type":"ContainerStarted","Data":"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.900174 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d8575b45-h5ntt" event={"ID":"f2652a34-9956-49b1-be3b-2be1309fd228","Type":"ContainerStarted","Data":"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.900278 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85d8575b45-h5ntt" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" containerName="horizon" containerID="cri-o://a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0" gracePeriod=30 Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.900286 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85d8575b45-h5ntt" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" containerName="horizon-log" containerID="cri-o://0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962" gracePeriod=30 Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.902324 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1ff300e5-7a29-4ec7-974c-de163370f2f8","Type":"ContainerStarted","Data":"93a66cba32b2d1b66a701bf0aab46106eed47a49083e8433a4c9a13b6121a5cf"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.904554 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.904792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d","Type":"ContainerDied","Data":"805d989ce5d2ddba87d41b9aef1fcfcc17a5ee8ca402f4b130b572f2149aae4e"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.904877 4861 scope.go:117] "RemoveContainer" containerID="5ae1c90c85b006c4ff066b1990e099f99eeec0dde3dd97e124951475df5bd1f6" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.908755 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f4b54cb6-vdr9c" event={"ID":"c569ed14-e911-4ead-ada8-270f32a1297f","Type":"ContainerStarted","Data":"28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.908798 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f4b54cb6-vdr9c" event={"ID":"c569ed14-e911-4ead-ada8-270f32a1297f","Type":"ContainerStarted","Data":"eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027"} Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.930136 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-59b7bfd865-j6jn9" podStartSLOduration=2.577911681 podStartE2EDuration="10.930116785s" podCreationTimestamp="2026-01-29 08:16:35 +0000 UTC" firstStartedPulling="2026-01-29 08:16:36.687816843 +0000 UTC m=+6088.359311400" lastFinishedPulling="2026-01-29 08:16:45.040021947 +0000 UTC m=+6096.711516504" observedRunningTime="2026-01-29 08:16:45.909639686 +0000 UTC m=+6097.581134243" watchObservedRunningTime="2026-01-29 08:16:45.930116785 +0000 UTC m=+6097.601611342" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.937282 4861 scope.go:117] "RemoveContainer" containerID="1c665bc05a78b77d6a80786dbce31909183c32f4ca989b40fd63de4386b8a22b" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.939956 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-596fc8cbb6-w86n9" podStartSLOduration=2.670457613 podStartE2EDuration="8.939933673s" podCreationTimestamp="2026-01-29 08:16:37 +0000 UTC" firstStartedPulling="2026-01-29 08:16:38.802198179 +0000 UTC m=+6090.473692736" lastFinishedPulling="2026-01-29 08:16:45.071674239 +0000 UTC m=+6096.743168796" observedRunningTime="2026-01-29 08:16:45.926581142 +0000 UTC m=+6097.598075709" watchObservedRunningTime="2026-01-29 08:16:45.939933673 +0000 UTC m=+6097.611428230" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.961514 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6f4b54cb6-vdr9c" podStartSLOduration=2.724139244 podStartE2EDuration="8.96149236s" podCreationTimestamp="2026-01-29 08:16:37 +0000 UTC" firstStartedPulling="2026-01-29 08:16:38.936963243 +0000 UTC m=+6090.608457800" lastFinishedPulling="2026-01-29 08:16:45.174316349 +0000 UTC m=+6096.845810916" observedRunningTime="2026-01-29 08:16:45.951106867 +0000 UTC m=+6097.622601424" watchObservedRunningTime="2026-01-29 08:16:45.96149236 +0000 UTC m=+6097.632986917" Jan 29 08:16:45 crc kubenswrapper[4861]: I0129 08:16:45.983062 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-85d8575b45-h5ntt" podStartSLOduration=2.7467163 podStartE2EDuration="10.983040186s" podCreationTimestamp="2026-01-29 08:16:35 +0000 UTC" firstStartedPulling="2026-01-29 08:16:36.845466239 +0000 UTC m=+6088.516960826" lastFinishedPulling="2026-01-29 08:16:45.081790125 +0000 UTC m=+6096.753284712" observedRunningTime="2026-01-29 08:16:45.974988325 +0000 UTC m=+6097.646482892" watchObservedRunningTime="2026-01-29 08:16:45.983040186 +0000 UTC m=+6097.654534743" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.012839 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.023607 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.044304 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:16:46 crc kubenswrapper[4861]: E0129 08:16:46.044804 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerName="glance-log" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.044823 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerName="glance-log" Jan 29 08:16:46 crc kubenswrapper[4861]: E0129 08:16:46.044837 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerName="glance-httpd" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.044843 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerName="glance-httpd" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.045041 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerName="glance-log" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.045062 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" containerName="glance-httpd" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.046158 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.048296 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.048453 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.049660 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.182719 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.231060 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsbdj\" (UniqueName: \"kubernetes.io/projected/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-kube-api-access-rsbdj\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.231195 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.231277 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-config-data\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.231304 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.231358 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-logs\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.231402 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-scripts\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.231432 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.329628 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.332903 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsbdj\" (UniqueName: \"kubernetes.io/projected/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-kube-api-access-rsbdj\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.333055 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.333175 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-config-data\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.333452 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.334252 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-logs\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.334338 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-scripts\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.334379 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.334908 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.334962 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-logs\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.341194 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-scripts\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.345575 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-config-data\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.359576 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.360087 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.363062 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsbdj\" (UniqueName: \"kubernetes.io/projected/9f7eb619-5ed2-4e32-a87a-ec61ac26bd98-kube-api-access-rsbdj\") pod \"glance-default-external-api-0\" (UID: \"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98\") " pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.366766 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.922770 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1ff300e5-7a29-4ec7-974c-de163370f2f8","Type":"ContainerStarted","Data":"8f34872bbab4edb6c0c00f2a04b4de673424411c1e473ce7d77802e21413988c"} Jan 29 08:16:46 crc kubenswrapper[4861]: I0129 08:16:46.960257 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 08:16:47 crc kubenswrapper[4861]: I0129 08:16:47.150387 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d" path="/var/lib/kubelet/pods/8c613aaf-cf4f-4ea4-b9ce-3d852bc9d00d/volumes" Jan 29 08:16:47 crc kubenswrapper[4861]: I0129 08:16:47.956441 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98","Type":"ContainerStarted","Data":"764e284abd456be9c3ddf435f7fa69adb5850e3f9f0766e3f585e425d4710be4"} Jan 29 08:16:47 crc kubenswrapper[4861]: I0129 08:16:47.956883 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98","Type":"ContainerStarted","Data":"7a5ced211bbb410fb38417fd1f58f9fa08c17c4b0f3fbb4f8a2e566f2fe65a3f"} Jan 29 08:16:47 crc kubenswrapper[4861]: I0129 08:16:47.964178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1ff300e5-7a29-4ec7-974c-de163370f2f8","Type":"ContainerStarted","Data":"bac1e6ec563aacdf7c1dc123ea5e737053457f3b296aa04f0db6e1396c9acd3d"} Jan 29 08:16:47 crc kubenswrapper[4861]: I0129 08:16:47.995453 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.995434671 podStartE2EDuration="7.995434671s" podCreationTimestamp="2026-01-29 08:16:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:16:47.984404701 +0000 UTC m=+6099.655899268" watchObservedRunningTime="2026-01-29 08:16:47.995434671 +0000 UTC m=+6099.666929228" Jan 29 08:16:48 crc kubenswrapper[4861]: I0129 08:16:48.234836 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:48 crc kubenswrapper[4861]: I0129 08:16:48.235242 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:16:48 crc kubenswrapper[4861]: I0129 08:16:48.350659 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:48 crc kubenswrapper[4861]: I0129 08:16:48.350745 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:16:48 crc kubenswrapper[4861]: I0129 08:16:48.979766 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9f7eb619-5ed2-4e32-a87a-ec61ac26bd98","Type":"ContainerStarted","Data":"bc2a66105ab936e56c1d6a14783785311dc79f51c959e948e78c5c6f7e15f925"} Jan 29 08:16:49 crc kubenswrapper[4861]: I0129 08:16:49.005828 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.005807523 podStartE2EDuration="4.005807523s" podCreationTimestamp="2026-01-29 08:16:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:16:48.998605253 +0000 UTC m=+6100.670099820" watchObservedRunningTime="2026-01-29 08:16:49.005807523 +0000 UTC m=+6100.677302080" Jan 29 08:16:50 crc kubenswrapper[4861]: I0129 08:16:50.537240 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:50 crc kubenswrapper[4861]: I0129 08:16:50.537615 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:50 crc kubenswrapper[4861]: I0129 08:16:50.571761 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:50 crc kubenswrapper[4861]: I0129 08:16:50.588282 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:50 crc kubenswrapper[4861]: I0129 08:16:50.995568 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:50 crc kubenswrapper[4861]: I0129 08:16:50.995889 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:53 crc kubenswrapper[4861]: I0129 08:16:53.962424 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:53 crc kubenswrapper[4861]: I0129 08:16:53.963450 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 08:16:56 crc kubenswrapper[4861]: I0129 08:16:56.367242 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 08:16:56 crc kubenswrapper[4861]: I0129 08:16:56.367592 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 08:16:56 crc kubenswrapper[4861]: I0129 08:16:56.408693 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 08:16:56 crc kubenswrapper[4861]: I0129 08:16:56.424506 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 08:16:57 crc kubenswrapper[4861]: I0129 08:16:57.075902 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 08:16:57 crc kubenswrapper[4861]: I0129 08:16:57.076241 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 08:16:58 crc kubenswrapper[4861]: I0129 08:16:58.237186 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-596fc8cbb6-w86n9" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.125:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.125:8443: connect: connection refused" Jan 29 08:16:58 crc kubenswrapper[4861]: I0129 08:16:58.352661 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6f4b54cb6-vdr9c" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.126:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.126:8443: connect: connection refused" Jan 29 08:16:58 crc kubenswrapper[4861]: I0129 08:16:58.960681 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 08:16:59 crc kubenswrapper[4861]: I0129 08:16:59.015443 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.312400 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-59krq"] Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.316015 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.329647 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-59krq"] Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.406336 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-catalog-content\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.406444 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-utilities\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.406585 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zl779\" (UniqueName: \"kubernetes.io/projected/912bce4b-e172-4e29-81c1-6cf9fd4753ba-kube-api-access-zl779\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.508400 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-utilities\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.508464 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zl779\" (UniqueName: \"kubernetes.io/projected/912bce4b-e172-4e29-81c1-6cf9fd4753ba-kube-api-access-zl779\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.508612 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-catalog-content\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.509017 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-utilities\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.509096 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-catalog-content\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.533169 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zl779\" (UniqueName: \"kubernetes.io/projected/912bce4b-e172-4e29-81c1-6cf9fd4753ba-kube-api-access-zl779\") pod \"redhat-operators-59krq\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:07 crc kubenswrapper[4861]: I0129 08:17:07.678354 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:08 crc kubenswrapper[4861]: I0129 08:17:08.192117 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-59krq"] Jan 29 08:17:08 crc kubenswrapper[4861]: I0129 08:17:08.203765 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-59krq" event={"ID":"912bce4b-e172-4e29-81c1-6cf9fd4753ba","Type":"ContainerStarted","Data":"afcc8088d74e07c672968aaabe8a37affb872e14f239f25986ee321cf170160f"} Jan 29 08:17:09 crc kubenswrapper[4861]: I0129 08:17:09.214880 4861 generic.go:334] "Generic (PLEG): container finished" podID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerID="5f0a41d5cd8db86bd32d2498b8dff05ee125f59df04602f72d0d362bd6da646f" exitCode=0 Jan 29 08:17:09 crc kubenswrapper[4861]: I0129 08:17:09.215014 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-59krq" event={"ID":"912bce4b-e172-4e29-81c1-6cf9fd4753ba","Type":"ContainerDied","Data":"5f0a41d5cd8db86bd32d2498b8dff05ee125f59df04602f72d0d362bd6da646f"} Jan 29 08:17:10 crc kubenswrapper[4861]: I0129 08:17:10.133671 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:17:10 crc kubenswrapper[4861]: I0129 08:17:10.184442 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:17:10 crc kubenswrapper[4861]: I0129 08:17:10.229884 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-59krq" event={"ID":"912bce4b-e172-4e29-81c1-6cf9fd4753ba","Type":"ContainerStarted","Data":"4986a34b5afca44e37725ce5594d8ad3779a0ef9e65be9a33d47ba1449c0bce7"} Jan 29 08:17:12 crc kubenswrapper[4861]: I0129 08:17:12.256238 4861 generic.go:334] "Generic (PLEG): container finished" podID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerID="4986a34b5afca44e37725ce5594d8ad3779a0ef9e65be9a33d47ba1449c0bce7" exitCode=0 Jan 29 08:17:12 crc kubenswrapper[4861]: I0129 08:17:12.256334 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-59krq" event={"ID":"912bce4b-e172-4e29-81c1-6cf9fd4753ba","Type":"ContainerDied","Data":"4986a34b5afca44e37725ce5594d8ad3779a0ef9e65be9a33d47ba1449c0bce7"} Jan 29 08:17:12 crc kubenswrapper[4861]: I0129 08:17:12.329740 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:17:12 crc kubenswrapper[4861]: I0129 08:17:12.414601 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-596fc8cbb6-w86n9"] Jan 29 08:17:12 crc kubenswrapper[4861]: I0129 08:17:12.414819 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-596fc8cbb6-w86n9" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon-log" containerID="cri-o://2ba67d3155a237d00b4a4d82a5a33069c26f881a25c1ecbdaf40ae1307dc8476" gracePeriod=30 Jan 29 08:17:12 crc kubenswrapper[4861]: I0129 08:17:12.414924 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-596fc8cbb6-w86n9" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" containerID="cri-o://f7b8d6ce47ddf73017a21e8efb18b334c894b3cd5ad286a54caa82252c0511fc" gracePeriod=30 Jan 29 08:17:12 crc kubenswrapper[4861]: I0129 08:17:12.422880 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-596fc8cbb6-w86n9" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.125:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Jan 29 08:17:13 crc kubenswrapper[4861]: I0129 08:17:13.267522 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-59krq" event={"ID":"912bce4b-e172-4e29-81c1-6cf9fd4753ba","Type":"ContainerStarted","Data":"c40ed166c4cfef1a3d3565d9714911564ef62d0b27f702ba4c4e48738d9257c0"} Jan 29 08:17:13 crc kubenswrapper[4861]: I0129 08:17:13.298629 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-59krq" podStartSLOduration=2.667961192 podStartE2EDuration="6.298605464s" podCreationTimestamp="2026-01-29 08:17:07 +0000 UTC" firstStartedPulling="2026-01-29 08:17:09.217498265 +0000 UTC m=+6120.888992822" lastFinishedPulling="2026-01-29 08:17:12.848142527 +0000 UTC m=+6124.519637094" observedRunningTime="2026-01-29 08:17:13.289307149 +0000 UTC m=+6124.960801736" watchObservedRunningTime="2026-01-29 08:17:13.298605464 +0000 UTC m=+6124.970100031" Jan 29 08:17:16 crc kubenswrapper[4861]: I0129 08:17:16.317573 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-596fc8cbb6-w86n9" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.125:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:49066->10.217.1.125:8443: read: connection reset by peer" Jan 29 08:17:16 crc kubenswrapper[4861]: I0129 08:17:16.330159 4861 generic.go:334] "Generic (PLEG): container finished" podID="39084618-2c91-4f07-9f78-baea7f30b91c" containerID="e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767" exitCode=137 Jan 29 08:17:16 crc kubenswrapper[4861]: I0129 08:17:16.330199 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59b7bfd865-j6jn9" event={"ID":"39084618-2c91-4f07-9f78-baea7f30b91c","Type":"ContainerDied","Data":"e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767"} Jan 29 08:17:16 crc kubenswrapper[4861]: I0129 08:17:16.842989 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:17:16 crc kubenswrapper[4861]: I0129 08:17:16.850037 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.031706 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-scripts\") pod \"f2652a34-9956-49b1-be3b-2be1309fd228\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.032086 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-scripts\") pod \"39084618-2c91-4f07-9f78-baea7f30b91c\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.032258 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxzf2\" (UniqueName: \"kubernetes.io/projected/39084618-2c91-4f07-9f78-baea7f30b91c-kube-api-access-hxzf2\") pod \"39084618-2c91-4f07-9f78-baea7f30b91c\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.032361 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcgd2\" (UniqueName: \"kubernetes.io/projected/f2652a34-9956-49b1-be3b-2be1309fd228-kube-api-access-zcgd2\") pod \"f2652a34-9956-49b1-be3b-2be1309fd228\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.032434 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-config-data\") pod \"f2652a34-9956-49b1-be3b-2be1309fd228\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.032571 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39084618-2c91-4f07-9f78-baea7f30b91c-horizon-secret-key\") pod \"39084618-2c91-4f07-9f78-baea7f30b91c\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.032646 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2652a34-9956-49b1-be3b-2be1309fd228-logs\") pod \"f2652a34-9956-49b1-be3b-2be1309fd228\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.032741 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-config-data\") pod \"39084618-2c91-4f07-9f78-baea7f30b91c\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.033244 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f2652a34-9956-49b1-be3b-2be1309fd228-horizon-secret-key\") pod \"f2652a34-9956-49b1-be3b-2be1309fd228\" (UID: \"f2652a34-9956-49b1-be3b-2be1309fd228\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.033377 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39084618-2c91-4f07-9f78-baea7f30b91c-logs\") pod \"39084618-2c91-4f07-9f78-baea7f30b91c\" (UID: \"39084618-2c91-4f07-9f78-baea7f30b91c\") " Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.033209 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2652a34-9956-49b1-be3b-2be1309fd228-logs" (OuterVolumeSpecName: "logs") pod "f2652a34-9956-49b1-be3b-2be1309fd228" (UID: "f2652a34-9956-49b1-be3b-2be1309fd228"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.034489 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2652a34-9956-49b1-be3b-2be1309fd228-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.034610 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39084618-2c91-4f07-9f78-baea7f30b91c-logs" (OuterVolumeSpecName: "logs") pod "39084618-2c91-4f07-9f78-baea7f30b91c" (UID: "39084618-2c91-4f07-9f78-baea7f30b91c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.038508 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39084618-2c91-4f07-9f78-baea7f30b91c-kube-api-access-hxzf2" (OuterVolumeSpecName: "kube-api-access-hxzf2") pod "39084618-2c91-4f07-9f78-baea7f30b91c" (UID: "39084618-2c91-4f07-9f78-baea7f30b91c"). InnerVolumeSpecName "kube-api-access-hxzf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.038611 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2652a34-9956-49b1-be3b-2be1309fd228-kube-api-access-zcgd2" (OuterVolumeSpecName: "kube-api-access-zcgd2") pod "f2652a34-9956-49b1-be3b-2be1309fd228" (UID: "f2652a34-9956-49b1-be3b-2be1309fd228"). InnerVolumeSpecName "kube-api-access-zcgd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.038796 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2652a34-9956-49b1-be3b-2be1309fd228-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "f2652a34-9956-49b1-be3b-2be1309fd228" (UID: "f2652a34-9956-49b1-be3b-2be1309fd228"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.039234 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39084618-2c91-4f07-9f78-baea7f30b91c-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "39084618-2c91-4f07-9f78-baea7f30b91c" (UID: "39084618-2c91-4f07-9f78-baea7f30b91c"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.062427 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-config-data" (OuterVolumeSpecName: "config-data") pod "f2652a34-9956-49b1-be3b-2be1309fd228" (UID: "f2652a34-9956-49b1-be3b-2be1309fd228"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.064834 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-config-data" (OuterVolumeSpecName: "config-data") pod "39084618-2c91-4f07-9f78-baea7f30b91c" (UID: "39084618-2c91-4f07-9f78-baea7f30b91c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.065475 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-scripts" (OuterVolumeSpecName: "scripts") pod "39084618-2c91-4f07-9f78-baea7f30b91c" (UID: "39084618-2c91-4f07-9f78-baea7f30b91c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.066146 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-scripts" (OuterVolumeSpecName: "scripts") pod "f2652a34-9956-49b1-be3b-2be1309fd228" (UID: "f2652a34-9956-49b1-be3b-2be1309fd228"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.136873 4861 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/39084618-2c91-4f07-9f78-baea7f30b91c-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.136899 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.136930 4861 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f2652a34-9956-49b1-be3b-2be1309fd228-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.136939 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39084618-2c91-4f07-9f78-baea7f30b91c-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.136969 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.136977 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39084618-2c91-4f07-9f78-baea7f30b91c-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.136986 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxzf2\" (UniqueName: \"kubernetes.io/projected/39084618-2c91-4f07-9f78-baea7f30b91c-kube-api-access-hxzf2\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.136996 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f2652a34-9956-49b1-be3b-2be1309fd228-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.137007 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcgd2\" (UniqueName: \"kubernetes.io/projected/f2652a34-9956-49b1-be3b-2be1309fd228-kube-api-access-zcgd2\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.344586 4861 generic.go:334] "Generic (PLEG): container finished" podID="39084618-2c91-4f07-9f78-baea7f30b91c" containerID="a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588" exitCode=137 Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.344658 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59b7bfd865-j6jn9" event={"ID":"39084618-2c91-4f07-9f78-baea7f30b91c","Type":"ContainerDied","Data":"a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588"} Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.345146 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-59b7bfd865-j6jn9" event={"ID":"39084618-2c91-4f07-9f78-baea7f30b91c","Type":"ContainerDied","Data":"624070b42a0f93970ae0a241fba0e42d340dd938856f5404ca07086891cd1267"} Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.345181 4861 scope.go:117] "RemoveContainer" containerID="a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.344701 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-59b7bfd865-j6jn9" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.352046 4861 generic.go:334] "Generic (PLEG): container finished" podID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerID="f7b8d6ce47ddf73017a21e8efb18b334c894b3cd5ad286a54caa82252c0511fc" exitCode=0 Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.352154 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-596fc8cbb6-w86n9" event={"ID":"ed85a66d-a2c3-445d-a911-8c7932fd9728","Type":"ContainerDied","Data":"f7b8d6ce47ddf73017a21e8efb18b334c894b3cd5ad286a54caa82252c0511fc"} Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.362668 4861 generic.go:334] "Generic (PLEG): container finished" podID="f2652a34-9956-49b1-be3b-2be1309fd228" containerID="a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0" exitCode=137 Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.363119 4861 generic.go:334] "Generic (PLEG): container finished" podID="f2652a34-9956-49b1-be3b-2be1309fd228" containerID="0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962" exitCode=137 Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.363035 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85d8575b45-h5ntt" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.362995 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d8575b45-h5ntt" event={"ID":"f2652a34-9956-49b1-be3b-2be1309fd228","Type":"ContainerDied","Data":"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0"} Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.363682 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d8575b45-h5ntt" event={"ID":"f2652a34-9956-49b1-be3b-2be1309fd228","Type":"ContainerDied","Data":"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962"} Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.363709 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85d8575b45-h5ntt" event={"ID":"f2652a34-9956-49b1-be3b-2be1309fd228","Type":"ContainerDied","Data":"bb96a2629d1b837b750f93def306960263319f6380c1b68272fc078cfca8f255"} Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.433275 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85d8575b45-h5ntt"] Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.442122 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-85d8575b45-h5ntt"] Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.460241 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-59b7bfd865-j6jn9"] Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.470334 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-59b7bfd865-j6jn9"] Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.574271 4861 scope.go:117] "RemoveContainer" containerID="e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.605566 4861 scope.go:117] "RemoveContainer" containerID="a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588" Jan 29 08:17:17 crc kubenswrapper[4861]: E0129 08:17:17.607037 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588\": container with ID starting with a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588 not found: ID does not exist" containerID="a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.607125 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588"} err="failed to get container status \"a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588\": rpc error: code = NotFound desc = could not find container \"a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588\": container with ID starting with a79b974bf406c745c7ff0d0290203f3fdef4b4639bb391b5f422e4198d90e588 not found: ID does not exist" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.607156 4861 scope.go:117] "RemoveContainer" containerID="e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767" Jan 29 08:17:17 crc kubenswrapper[4861]: E0129 08:17:17.607627 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767\": container with ID starting with e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767 not found: ID does not exist" containerID="e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.607660 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767"} err="failed to get container status \"e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767\": rpc error: code = NotFound desc = could not find container \"e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767\": container with ID starting with e3f7d66f7543fd98c5064ebe21f02613b8b8fde2fde636af93e815164c63c767 not found: ID does not exist" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.607679 4861 scope.go:117] "RemoveContainer" containerID="a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.686448 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.686872 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.790342 4861 scope.go:117] "RemoveContainer" containerID="0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.812311 4861 scope.go:117] "RemoveContainer" containerID="a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0" Jan 29 08:17:17 crc kubenswrapper[4861]: E0129 08:17:17.812825 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0\": container with ID starting with a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0 not found: ID does not exist" containerID="a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.812884 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0"} err="failed to get container status \"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0\": rpc error: code = NotFound desc = could not find container \"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0\": container with ID starting with a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0 not found: ID does not exist" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.812919 4861 scope.go:117] "RemoveContainer" containerID="0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962" Jan 29 08:17:17 crc kubenswrapper[4861]: E0129 08:17:17.813228 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962\": container with ID starting with 0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962 not found: ID does not exist" containerID="0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.813263 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962"} err="failed to get container status \"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962\": rpc error: code = NotFound desc = could not find container \"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962\": container with ID starting with 0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962 not found: ID does not exist" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.813287 4861 scope.go:117] "RemoveContainer" containerID="a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.813486 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0"} err="failed to get container status \"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0\": rpc error: code = NotFound desc = could not find container \"a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0\": container with ID starting with a6ad6863e5a981b7766bc794582ef1f19f82aef28a2ad0c8aac2c4645ad19aa0 not found: ID does not exist" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.813509 4861 scope.go:117] "RemoveContainer" containerID="0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962" Jan 29 08:17:17 crc kubenswrapper[4861]: I0129 08:17:17.813689 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962"} err="failed to get container status \"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962\": rpc error: code = NotFound desc = could not find container \"0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962\": container with ID starting with 0b4cb85cb685220ae5c44681c6743a088a6b348f8b5f945a0365b778f2276962 not found: ID does not exist" Jan 29 08:17:18 crc kubenswrapper[4861]: I0129 08:17:18.234570 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-596fc8cbb6-w86n9" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.125:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.125:8443: connect: connection refused" Jan 29 08:17:18 crc kubenswrapper[4861]: I0129 08:17:18.743437 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-59krq" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="registry-server" probeResult="failure" output=< Jan 29 08:17:18 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:17:18 crc kubenswrapper[4861]: > Jan 29 08:17:19 crc kubenswrapper[4861]: I0129 08:17:19.129681 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" path="/var/lib/kubelet/pods/39084618-2c91-4f07-9f78-baea7f30b91c/volumes" Jan 29 08:17:19 crc kubenswrapper[4861]: I0129 08:17:19.130974 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" path="/var/lib/kubelet/pods/f2652a34-9956-49b1-be3b-2be1309fd228/volumes" Jan 29 08:17:26 crc kubenswrapper[4861]: I0129 08:17:26.074787 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-84ef-account-create-update-ngb6s"] Jan 29 08:17:26 crc kubenswrapper[4861]: I0129 08:17:26.090891 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-nq84p"] Jan 29 08:17:26 crc kubenswrapper[4861]: I0129 08:17:26.106374 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-84ef-account-create-update-ngb6s"] Jan 29 08:17:26 crc kubenswrapper[4861]: I0129 08:17:26.149109 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-nq84p"] Jan 29 08:17:27 crc kubenswrapper[4861]: I0129 08:17:27.134343 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bee00f6c-5570-42f4-9594-543e911751fe" path="/var/lib/kubelet/pods/bee00f6c-5570-42f4-9594-543e911751fe/volumes" Jan 29 08:17:27 crc kubenswrapper[4861]: I0129 08:17:27.136634 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d76ada8d-3b62-4dc2-94f9-af552c0e4e1c" path="/var/lib/kubelet/pods/d76ada8d-3b62-4dc2-94f9-af552c0e4e1c/volumes" Jan 29 08:17:27 crc kubenswrapper[4861]: I0129 08:17:27.610475 4861 scope.go:117] "RemoveContainer" containerID="88b1ca20977e144895268125e1f641464b96086df8bc54bfccd8ea407dbd2ed5" Jan 29 08:17:27 crc kubenswrapper[4861]: I0129 08:17:27.656341 4861 scope.go:117] "RemoveContainer" containerID="df36bfb544ce387d3f722be4b0fdd07d6a5a55b91e45f6b42941a822fd332436" Jan 29 08:17:27 crc kubenswrapper[4861]: I0129 08:17:27.722392 4861 scope.go:117] "RemoveContainer" containerID="97c71a1a99e929acc028573e1bffd3753005ca278e2887b379a03d0ebc25e18a" Jan 29 08:17:27 crc kubenswrapper[4861]: I0129 08:17:27.766285 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:27 crc kubenswrapper[4861]: I0129 08:17:27.852958 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:28 crc kubenswrapper[4861]: I0129 08:17:28.007696 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-59krq"] Jan 29 08:17:28 crc kubenswrapper[4861]: I0129 08:17:28.235024 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-596fc8cbb6-w86n9" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.125:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.125:8443: connect: connection refused" Jan 29 08:17:29 crc kubenswrapper[4861]: I0129 08:17:29.497232 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-59krq" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="registry-server" containerID="cri-o://c40ed166c4cfef1a3d3565d9714911564ef62d0b27f702ba4c4e48738d9257c0" gracePeriod=2 Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.507776 4861 generic.go:334] "Generic (PLEG): container finished" podID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerID="c40ed166c4cfef1a3d3565d9714911564ef62d0b27f702ba4c4e48738d9257c0" exitCode=0 Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.507877 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-59krq" event={"ID":"912bce4b-e172-4e29-81c1-6cf9fd4753ba","Type":"ContainerDied","Data":"c40ed166c4cfef1a3d3565d9714911564ef62d0b27f702ba4c4e48738d9257c0"} Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.508154 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-59krq" event={"ID":"912bce4b-e172-4e29-81c1-6cf9fd4753ba","Type":"ContainerDied","Data":"afcc8088d74e07c672968aaabe8a37affb872e14f239f25986ee321cf170160f"} Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.508170 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afcc8088d74e07c672968aaabe8a37affb872e14f239f25986ee321cf170160f" Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.584544 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.629776 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.629893 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.677017 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zl779\" (UniqueName: \"kubernetes.io/projected/912bce4b-e172-4e29-81c1-6cf9fd4753ba-kube-api-access-zl779\") pod \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.677141 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-catalog-content\") pod \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.677288 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-utilities\") pod \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\" (UID: \"912bce4b-e172-4e29-81c1-6cf9fd4753ba\") " Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.677970 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-utilities" (OuterVolumeSpecName: "utilities") pod "912bce4b-e172-4e29-81c1-6cf9fd4753ba" (UID: "912bce4b-e172-4e29-81c1-6cf9fd4753ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.682264 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/912bce4b-e172-4e29-81c1-6cf9fd4753ba-kube-api-access-zl779" (OuterVolumeSpecName: "kube-api-access-zl779") pod "912bce4b-e172-4e29-81c1-6cf9fd4753ba" (UID: "912bce4b-e172-4e29-81c1-6cf9fd4753ba"). InnerVolumeSpecName "kube-api-access-zl779". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.779966 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.780024 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zl779\" (UniqueName: \"kubernetes.io/projected/912bce4b-e172-4e29-81c1-6cf9fd4753ba-kube-api-access-zl779\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.826649 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "912bce4b-e172-4e29-81c1-6cf9fd4753ba" (UID: "912bce4b-e172-4e29-81c1-6cf9fd4753ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:17:30 crc kubenswrapper[4861]: I0129 08:17:30.881805 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/912bce4b-e172-4e29-81c1-6cf9fd4753ba-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:31 crc kubenswrapper[4861]: I0129 08:17:31.518811 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-59krq" Jan 29 08:17:31 crc kubenswrapper[4861]: I0129 08:17:31.554241 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-59krq"] Jan 29 08:17:31 crc kubenswrapper[4861]: I0129 08:17:31.569998 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-59krq"] Jan 29 08:17:33 crc kubenswrapper[4861]: I0129 08:17:33.129769 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" path="/var/lib/kubelet/pods/912bce4b-e172-4e29-81c1-6cf9fd4753ba/volumes" Jan 29 08:17:34 crc kubenswrapper[4861]: I0129 08:17:34.069662 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-q7c5w"] Jan 29 08:17:34 crc kubenswrapper[4861]: I0129 08:17:34.082159 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-q7c5w"] Jan 29 08:17:35 crc kubenswrapper[4861]: I0129 08:17:35.142027 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83194fd3-8714-4812-99c5-5e3870600345" path="/var/lib/kubelet/pods/83194fd3-8714-4812-99c5-5e3870600345/volumes" Jan 29 08:17:36 crc kubenswrapper[4861]: E0129 08:17:36.687334 4861 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/9d5074aa519d17acdc8ae843222aae5b9146fdf44009fb5dd89635455f68858d/diff" to get inode usage: stat /var/lib/containers/storage/overlay/9d5074aa519d17acdc8ae843222aae5b9146fdf44009fb5dd89635455f68858d/diff: no such file or directory, extraDiskErr: Jan 29 08:17:38 crc kubenswrapper[4861]: I0129 08:17:38.235366 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-596fc8cbb6-w86n9" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.125:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.125:8443: connect: connection refused" Jan 29 08:17:42 crc kubenswrapper[4861]: I0129 08:17:42.659971 4861 generic.go:334] "Generic (PLEG): container finished" podID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerID="2ba67d3155a237d00b4a4d82a5a33069c26f881a25c1ecbdaf40ae1307dc8476" exitCode=137 Jan 29 08:17:42 crc kubenswrapper[4861]: I0129 08:17:42.660661 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-596fc8cbb6-w86n9" event={"ID":"ed85a66d-a2c3-445d-a911-8c7932fd9728","Type":"ContainerDied","Data":"2ba67d3155a237d00b4a4d82a5a33069c26f881a25c1ecbdaf40ae1307dc8476"} Jan 29 08:17:42 crc kubenswrapper[4861]: I0129 08:17:42.975589 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.109802 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-scripts\") pod \"ed85a66d-a2c3-445d-a911-8c7932fd9728\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.109883 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-secret-key\") pod \"ed85a66d-a2c3-445d-a911-8c7932fd9728\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.109917 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zp4j4\" (UniqueName: \"kubernetes.io/projected/ed85a66d-a2c3-445d-a911-8c7932fd9728-kube-api-access-zp4j4\") pod \"ed85a66d-a2c3-445d-a911-8c7932fd9728\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.109984 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-config-data\") pod \"ed85a66d-a2c3-445d-a911-8c7932fd9728\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.110041 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed85a66d-a2c3-445d-a911-8c7932fd9728-logs\") pod \"ed85a66d-a2c3-445d-a911-8c7932fd9728\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.110171 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-tls-certs\") pod \"ed85a66d-a2c3-445d-a911-8c7932fd9728\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.110253 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-combined-ca-bundle\") pod \"ed85a66d-a2c3-445d-a911-8c7932fd9728\" (UID: \"ed85a66d-a2c3-445d-a911-8c7932fd9728\") " Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.113222 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed85a66d-a2c3-445d-a911-8c7932fd9728-logs" (OuterVolumeSpecName: "logs") pod "ed85a66d-a2c3-445d-a911-8c7932fd9728" (UID: "ed85a66d-a2c3-445d-a911-8c7932fd9728"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.119376 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed85a66d-a2c3-445d-a911-8c7932fd9728-kube-api-access-zp4j4" (OuterVolumeSpecName: "kube-api-access-zp4j4") pod "ed85a66d-a2c3-445d-a911-8c7932fd9728" (UID: "ed85a66d-a2c3-445d-a911-8c7932fd9728"). InnerVolumeSpecName "kube-api-access-zp4j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.120372 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "ed85a66d-a2c3-445d-a911-8c7932fd9728" (UID: "ed85a66d-a2c3-445d-a911-8c7932fd9728"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.142193 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed85a66d-a2c3-445d-a911-8c7932fd9728" (UID: "ed85a66d-a2c3-445d-a911-8c7932fd9728"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.144911 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-config-data" (OuterVolumeSpecName: "config-data") pod "ed85a66d-a2c3-445d-a911-8c7932fd9728" (UID: "ed85a66d-a2c3-445d-a911-8c7932fd9728"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.160021 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-scripts" (OuterVolumeSpecName: "scripts") pod "ed85a66d-a2c3-445d-a911-8c7932fd9728" (UID: "ed85a66d-a2c3-445d-a911-8c7932fd9728"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.178345 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "ed85a66d-a2c3-445d-a911-8c7932fd9728" (UID: "ed85a66d-a2c3-445d-a911-8c7932fd9728"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.215004 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.215038 4861 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.215051 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zp4j4\" (UniqueName: \"kubernetes.io/projected/ed85a66d-a2c3-445d-a911-8c7932fd9728-kube-api-access-zp4j4\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.215060 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ed85a66d-a2c3-445d-a911-8c7932fd9728-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.215098 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed85a66d-a2c3-445d-a911-8c7932fd9728-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.215109 4861 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.215118 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed85a66d-a2c3-445d-a911-8c7932fd9728-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.678577 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-596fc8cbb6-w86n9" event={"ID":"ed85a66d-a2c3-445d-a911-8c7932fd9728","Type":"ContainerDied","Data":"69dce663aad793a4ad423cd13bf98345f2e5c6975395bb77147133f3eeb5b8c2"} Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.679036 4861 scope.go:117] "RemoveContainer" containerID="f7b8d6ce47ddf73017a21e8efb18b334c894b3cd5ad286a54caa82252c0511fc" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.679294 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-596fc8cbb6-w86n9" Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.741215 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-596fc8cbb6-w86n9"] Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.753827 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-596fc8cbb6-w86n9"] Jan 29 08:17:43 crc kubenswrapper[4861]: I0129 08:17:43.899767 4861 scope.go:117] "RemoveContainer" containerID="2ba67d3155a237d00b4a4d82a5a33069c26f881a25c1ecbdaf40ae1307dc8476" Jan 29 08:17:45 crc kubenswrapper[4861]: I0129 08:17:45.133297 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" path="/var/lib/kubelet/pods/ed85a66d-a2c3-445d-a911-8c7932fd9728/volumes" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.376637 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-65994449bb-mb4xq"] Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377467 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377481 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377495 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="registry-server" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377501 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="registry-server" Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377511 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377517 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377533 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377560 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377574 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="extract-utilities" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377581 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="extract-utilities" Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377589 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377594 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377614 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377619 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377627 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="extract-content" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377632 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="extract-content" Jan 29 08:17:52 crc kubenswrapper[4861]: E0129 08:17:52.377645 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377651 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377801 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377815 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377821 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="912bce4b-e172-4e29-81c1-6cf9fd4753ba" containerName="registry-server" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377832 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" containerName="horizon" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377844 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed85a66d-a2c3-445d-a911-8c7932fd9728" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377861 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2652a34-9956-49b1-be3b-2be1309fd228" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.377872 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="39084618-2c91-4f07-9f78-baea7f30b91c" containerName="horizon-log" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.378880 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.391310 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-65994449bb-mb4xq"] Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.539031 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-combined-ca-bundle\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.539768 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f338e1be-fc4b-4c1b-b08f-456303eef9bc-config-data\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.539803 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-horizon-tls-certs\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.540329 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjwkj\" (UniqueName: \"kubernetes.io/projected/f338e1be-fc4b-4c1b-b08f-456303eef9bc-kube-api-access-zjwkj\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.540486 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-horizon-secret-key\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.540558 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f338e1be-fc4b-4c1b-b08f-456303eef9bc-scripts\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.540657 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f338e1be-fc4b-4c1b-b08f-456303eef9bc-logs\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.643241 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjwkj\" (UniqueName: \"kubernetes.io/projected/f338e1be-fc4b-4c1b-b08f-456303eef9bc-kube-api-access-zjwkj\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.643350 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-horizon-secret-key\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.643397 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f338e1be-fc4b-4c1b-b08f-456303eef9bc-scripts\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.643452 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f338e1be-fc4b-4c1b-b08f-456303eef9bc-logs\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.643539 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-combined-ca-bundle\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.643613 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f338e1be-fc4b-4c1b-b08f-456303eef9bc-config-data\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.644128 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f338e1be-fc4b-4c1b-b08f-456303eef9bc-logs\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.654272 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-horizon-tls-certs\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.654942 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f338e1be-fc4b-4c1b-b08f-456303eef9bc-scripts\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.656125 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f338e1be-fc4b-4c1b-b08f-456303eef9bc-config-data\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.658740 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-combined-ca-bundle\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.658827 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-horizon-tls-certs\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.667013 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjwkj\" (UniqueName: \"kubernetes.io/projected/f338e1be-fc4b-4c1b-b08f-456303eef9bc-kube-api-access-zjwkj\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.667129 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f338e1be-fc4b-4c1b-b08f-456303eef9bc-horizon-secret-key\") pod \"horizon-65994449bb-mb4xq\" (UID: \"f338e1be-fc4b-4c1b-b08f-456303eef9bc\") " pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:52 crc kubenswrapper[4861]: I0129 08:17:52.710615 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:17:53 crc kubenswrapper[4861]: W0129 08:17:53.228656 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf338e1be_fc4b_4c1b_b08f_456303eef9bc.slice/crio-1b46dce5e3dc60c119b3569e28e54c71be714e7c7ca4c1a0840fac56cb86ee62 WatchSource:0}: Error finding container 1b46dce5e3dc60c119b3569e28e54c71be714e7c7ca4c1a0840fac56cb86ee62: Status 404 returned error can't find the container with id 1b46dce5e3dc60c119b3569e28e54c71be714e7c7ca4c1a0840fac56cb86ee62 Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.240099 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-65994449bb-mb4xq"] Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.569092 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-9sql6"] Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.570914 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-9sql6" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.572913 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2825b4e3-4159-492d-87f0-90fc88b8c345-operator-scripts\") pod \"heat-db-create-9sql6\" (UID: \"2825b4e3-4159-492d-87f0-90fc88b8c345\") " pod="openstack/heat-db-create-9sql6" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.573153 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqqzd\" (UniqueName: \"kubernetes.io/projected/2825b4e3-4159-492d-87f0-90fc88b8c345-kube-api-access-fqqzd\") pod \"heat-db-create-9sql6\" (UID: \"2825b4e3-4159-492d-87f0-90fc88b8c345\") " pod="openstack/heat-db-create-9sql6" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.578811 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-9sql6"] Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.667321 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-7ed2-account-create-update-98kvj"] Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.668754 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.671626 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.674764 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-operator-scripts\") pod \"heat-7ed2-account-create-update-98kvj\" (UID: \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\") " pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.674908 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqqzd\" (UniqueName: \"kubernetes.io/projected/2825b4e3-4159-492d-87f0-90fc88b8c345-kube-api-access-fqqzd\") pod \"heat-db-create-9sql6\" (UID: \"2825b4e3-4159-492d-87f0-90fc88b8c345\") " pod="openstack/heat-db-create-9sql6" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.674977 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pqvh\" (UniqueName: \"kubernetes.io/projected/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-kube-api-access-7pqvh\") pod \"heat-7ed2-account-create-update-98kvj\" (UID: \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\") " pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.675035 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2825b4e3-4159-492d-87f0-90fc88b8c345-operator-scripts\") pod \"heat-db-create-9sql6\" (UID: \"2825b4e3-4159-492d-87f0-90fc88b8c345\") " pod="openstack/heat-db-create-9sql6" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.675997 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2825b4e3-4159-492d-87f0-90fc88b8c345-operator-scripts\") pod \"heat-db-create-9sql6\" (UID: \"2825b4e3-4159-492d-87f0-90fc88b8c345\") " pod="openstack/heat-db-create-9sql6" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.695350 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-7ed2-account-create-update-98kvj"] Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.698899 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqqzd\" (UniqueName: \"kubernetes.io/projected/2825b4e3-4159-492d-87f0-90fc88b8c345-kube-api-access-fqqzd\") pod \"heat-db-create-9sql6\" (UID: \"2825b4e3-4159-492d-87f0-90fc88b8c345\") " pod="openstack/heat-db-create-9sql6" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.778372 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-operator-scripts\") pod \"heat-7ed2-account-create-update-98kvj\" (UID: \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\") " pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.778796 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-operator-scripts\") pod \"heat-7ed2-account-create-update-98kvj\" (UID: \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\") " pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.779033 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pqvh\" (UniqueName: \"kubernetes.io/projected/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-kube-api-access-7pqvh\") pod \"heat-7ed2-account-create-update-98kvj\" (UID: \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\") " pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.781912 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65994449bb-mb4xq" event={"ID":"f338e1be-fc4b-4c1b-b08f-456303eef9bc","Type":"ContainerStarted","Data":"788a853124f42684aed21724fa2064bc7da4634517ebe7d8d82d9f305835f826"} Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.781946 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65994449bb-mb4xq" event={"ID":"f338e1be-fc4b-4c1b-b08f-456303eef9bc","Type":"ContainerStarted","Data":"21becdb94a62a70eb9c2d72df731ac3c2da09b122dd7779ef5149aff91c56e8e"} Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.781959 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65994449bb-mb4xq" event={"ID":"f338e1be-fc4b-4c1b-b08f-456303eef9bc","Type":"ContainerStarted","Data":"1b46dce5e3dc60c119b3569e28e54c71be714e7c7ca4c1a0840fac56cb86ee62"} Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.812684 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-65994449bb-mb4xq" podStartSLOduration=1.812658085 podStartE2EDuration="1.812658085s" podCreationTimestamp="2026-01-29 08:17:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:17:53.802669352 +0000 UTC m=+6165.474163909" watchObservedRunningTime="2026-01-29 08:17:53.812658085 +0000 UTC m=+6165.484152642" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.822694 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pqvh\" (UniqueName: \"kubernetes.io/projected/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-kube-api-access-7pqvh\") pod \"heat-7ed2-account-create-update-98kvj\" (UID: \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\") " pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:53 crc kubenswrapper[4861]: I0129 08:17:53.902596 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-9sql6" Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.059412 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:54 crc kubenswrapper[4861]: W0129 08:17:54.373640 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2825b4e3_4159_492d_87f0_90fc88b8c345.slice/crio-3f4949f851c1dc28e604a007ac2ee409f220d4849fa7578d49ca0208901f4cb6 WatchSource:0}: Error finding container 3f4949f851c1dc28e604a007ac2ee409f220d4849fa7578d49ca0208901f4cb6: Status 404 returned error can't find the container with id 3f4949f851c1dc28e604a007ac2ee409f220d4849fa7578d49ca0208901f4cb6 Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.374010 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-9sql6"] Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.535068 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-7ed2-account-create-update-98kvj"] Jan 29 08:17:54 crc kubenswrapper[4861]: W0129 08:17:54.538582 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfcbbac74_0f1c_4d78_a3be_7a6c25933bd2.slice/crio-817c386123924975783bf2401f9b2f19d791ac464eed7ae1e183d351c561d71b WatchSource:0}: Error finding container 817c386123924975783bf2401f9b2f19d791ac464eed7ae1e183d351c561d71b: Status 404 returned error can't find the container with id 817c386123924975783bf2401f9b2f19d791ac464eed7ae1e183d351c561d71b Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.797154 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-7ed2-account-create-update-98kvj" event={"ID":"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2","Type":"ContainerStarted","Data":"32586d795f9a74c4b8d577a6391b61598e554c87d4c33ddc7ab5325084a00786"} Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.797212 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-7ed2-account-create-update-98kvj" event={"ID":"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2","Type":"ContainerStarted","Data":"817c386123924975783bf2401f9b2f19d791ac464eed7ae1e183d351c561d71b"} Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.800503 4861 generic.go:334] "Generic (PLEG): container finished" podID="2825b4e3-4159-492d-87f0-90fc88b8c345" containerID="3c71dc347553eb11e9289703482de50a5c3af907fd475bace6095a34dc7fac58" exitCode=0 Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.801264 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-9sql6" event={"ID":"2825b4e3-4159-492d-87f0-90fc88b8c345","Type":"ContainerDied","Data":"3c71dc347553eb11e9289703482de50a5c3af907fd475bace6095a34dc7fac58"} Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.801331 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-9sql6" event={"ID":"2825b4e3-4159-492d-87f0-90fc88b8c345","Type":"ContainerStarted","Data":"3f4949f851c1dc28e604a007ac2ee409f220d4849fa7578d49ca0208901f4cb6"} Jan 29 08:17:54 crc kubenswrapper[4861]: I0129 08:17:54.823614 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-7ed2-account-create-update-98kvj" podStartSLOduration=1.823594301 podStartE2EDuration="1.823594301s" podCreationTimestamp="2026-01-29 08:17:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:17:54.819492714 +0000 UTC m=+6166.490987291" watchObservedRunningTime="2026-01-29 08:17:54.823594301 +0000 UTC m=+6166.495088848" Jan 29 08:17:55 crc kubenswrapper[4861]: I0129 08:17:55.812693 4861 generic.go:334] "Generic (PLEG): container finished" podID="fcbbac74-0f1c-4d78-a3be-7a6c25933bd2" containerID="32586d795f9a74c4b8d577a6391b61598e554c87d4c33ddc7ab5325084a00786" exitCode=0 Jan 29 08:17:55 crc kubenswrapper[4861]: I0129 08:17:55.812764 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-7ed2-account-create-update-98kvj" event={"ID":"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2","Type":"ContainerDied","Data":"32586d795f9a74c4b8d577a6391b61598e554c87d4c33ddc7ab5325084a00786"} Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.194275 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-9sql6" Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.238440 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqqzd\" (UniqueName: \"kubernetes.io/projected/2825b4e3-4159-492d-87f0-90fc88b8c345-kube-api-access-fqqzd\") pod \"2825b4e3-4159-492d-87f0-90fc88b8c345\" (UID: \"2825b4e3-4159-492d-87f0-90fc88b8c345\") " Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.238741 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2825b4e3-4159-492d-87f0-90fc88b8c345-operator-scripts\") pod \"2825b4e3-4159-492d-87f0-90fc88b8c345\" (UID: \"2825b4e3-4159-492d-87f0-90fc88b8c345\") " Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.239437 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2825b4e3-4159-492d-87f0-90fc88b8c345-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2825b4e3-4159-492d-87f0-90fc88b8c345" (UID: "2825b4e3-4159-492d-87f0-90fc88b8c345"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.247327 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2825b4e3-4159-492d-87f0-90fc88b8c345-kube-api-access-fqqzd" (OuterVolumeSpecName: "kube-api-access-fqqzd") pod "2825b4e3-4159-492d-87f0-90fc88b8c345" (UID: "2825b4e3-4159-492d-87f0-90fc88b8c345"). InnerVolumeSpecName "kube-api-access-fqqzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.346307 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqqzd\" (UniqueName: \"kubernetes.io/projected/2825b4e3-4159-492d-87f0-90fc88b8c345-kube-api-access-fqqzd\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.346367 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2825b4e3-4159-492d-87f0-90fc88b8c345-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.831869 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-9sql6" event={"ID":"2825b4e3-4159-492d-87f0-90fc88b8c345","Type":"ContainerDied","Data":"3f4949f851c1dc28e604a007ac2ee409f220d4849fa7578d49ca0208901f4cb6"} Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.831919 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f4949f851c1dc28e604a007ac2ee409f220d4849fa7578d49ca0208901f4cb6" Jan 29 08:17:56 crc kubenswrapper[4861]: I0129 08:17:56.831881 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-9sql6" Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.193429 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.264304 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-operator-scripts\") pod \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\" (UID: \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\") " Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.264354 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pqvh\" (UniqueName: \"kubernetes.io/projected/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-kube-api-access-7pqvh\") pod \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\" (UID: \"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2\") " Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.265259 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fcbbac74-0f1c-4d78-a3be-7a6c25933bd2" (UID: "fcbbac74-0f1c-4d78-a3be-7a6c25933bd2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.271981 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-kube-api-access-7pqvh" (OuterVolumeSpecName: "kube-api-access-7pqvh") pod "fcbbac74-0f1c-4d78-a3be-7a6c25933bd2" (UID: "fcbbac74-0f1c-4d78-a3be-7a6c25933bd2"). InnerVolumeSpecName "kube-api-access-7pqvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.365909 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.365942 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pqvh\" (UniqueName: \"kubernetes.io/projected/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2-kube-api-access-7pqvh\") on node \"crc\" DevicePath \"\"" Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.850745 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-7ed2-account-create-update-98kvj" event={"ID":"fcbbac74-0f1c-4d78-a3be-7a6c25933bd2","Type":"ContainerDied","Data":"817c386123924975783bf2401f9b2f19d791ac464eed7ae1e183d351c561d71b"} Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.850809 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-7ed2-account-create-update-98kvj" Jan 29 08:17:57 crc kubenswrapper[4861]: I0129 08:17:57.850817 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="817c386123924975783bf2401f9b2f19d791ac464eed7ae1e183d351c561d71b" Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.891170 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-lmzn7"] Jan 29 08:17:58 crc kubenswrapper[4861]: E0129 08:17:58.891800 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcbbac74-0f1c-4d78-a3be-7a6c25933bd2" containerName="mariadb-account-create-update" Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.891814 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcbbac74-0f1c-4d78-a3be-7a6c25933bd2" containerName="mariadb-account-create-update" Jan 29 08:17:58 crc kubenswrapper[4861]: E0129 08:17:58.891834 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2825b4e3-4159-492d-87f0-90fc88b8c345" containerName="mariadb-database-create" Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.891840 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="2825b4e3-4159-492d-87f0-90fc88b8c345" containerName="mariadb-database-create" Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.898615 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcbbac74-0f1c-4d78-a3be-7a6c25933bd2" containerName="mariadb-account-create-update" Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.898653 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="2825b4e3-4159-492d-87f0-90fc88b8c345" containerName="mariadb-database-create" Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.903385 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-lmzn7"] Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.903482 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.907459 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 29 08:17:58 crc kubenswrapper[4861]: I0129 08:17:58.907644 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-vxxnk" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.000518 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-config-data\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.000623 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql2cw\" (UniqueName: \"kubernetes.io/projected/40129c18-e920-4ead-a7d2-27d1c2c442ab-kube-api-access-ql2cw\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.000744 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-combined-ca-bundle\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.102475 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-config-data\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.102570 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql2cw\" (UniqueName: \"kubernetes.io/projected/40129c18-e920-4ead-a7d2-27d1c2c442ab-kube-api-access-ql2cw\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.102641 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-combined-ca-bundle\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.108881 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-config-data\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.121846 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-combined-ca-bundle\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.143119 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql2cw\" (UniqueName: \"kubernetes.io/projected/40129c18-e920-4ead-a7d2-27d1c2c442ab-kube-api-access-ql2cw\") pod \"heat-db-sync-lmzn7\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.230234 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-lmzn7" Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.706779 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-lmzn7"] Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.712492 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:17:59 crc kubenswrapper[4861]: I0129 08:17:59.899131 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-lmzn7" event={"ID":"40129c18-e920-4ead-a7d2-27d1c2c442ab","Type":"ContainerStarted","Data":"dded10f997b3cb9ae1b30066e4595003fda99db0b049a9bbd7cf4d7484bce0a2"} Jan 29 08:18:00 crc kubenswrapper[4861]: I0129 08:18:00.629913 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:18:00 crc kubenswrapper[4861]: I0129 08:18:00.629973 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:18:02 crc kubenswrapper[4861]: I0129 08:18:02.711100 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:18:02 crc kubenswrapper[4861]: I0129 08:18:02.711499 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:18:05 crc kubenswrapper[4861]: I0129 08:18:05.992194 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-lmzn7" event={"ID":"40129c18-e920-4ead-a7d2-27d1c2c442ab","Type":"ContainerStarted","Data":"741f43c77976cd1aeda1d3c9c60155410d344c4260908316596da8caf7e66462"} Jan 29 08:18:06 crc kubenswrapper[4861]: I0129 08:18:06.016270 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-lmzn7" podStartSLOduration=2.092249952 podStartE2EDuration="8.016251866s" podCreationTimestamp="2026-01-29 08:17:58 +0000 UTC" firstStartedPulling="2026-01-29 08:17:59.712254068 +0000 UTC m=+6171.383748625" lastFinishedPulling="2026-01-29 08:18:05.636255952 +0000 UTC m=+6177.307750539" observedRunningTime="2026-01-29 08:18:06.013198895 +0000 UTC m=+6177.684693452" watchObservedRunningTime="2026-01-29 08:18:06.016251866 +0000 UTC m=+6177.687746423" Jan 29 08:18:09 crc kubenswrapper[4861]: I0129 08:18:09.030023 4861 generic.go:334] "Generic (PLEG): container finished" podID="40129c18-e920-4ead-a7d2-27d1c2c442ab" containerID="741f43c77976cd1aeda1d3c9c60155410d344c4260908316596da8caf7e66462" exitCode=0 Jan 29 08:18:09 crc kubenswrapper[4861]: I0129 08:18:09.030170 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-lmzn7" event={"ID":"40129c18-e920-4ead-a7d2-27d1c2c442ab","Type":"ContainerDied","Data":"741f43c77976cd1aeda1d3c9c60155410d344c4260908316596da8caf7e66462"} Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.527522 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-lmzn7" Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.597538 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ql2cw\" (UniqueName: \"kubernetes.io/projected/40129c18-e920-4ead-a7d2-27d1c2c442ab-kube-api-access-ql2cw\") pod \"40129c18-e920-4ead-a7d2-27d1c2c442ab\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.597630 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-config-data\") pod \"40129c18-e920-4ead-a7d2-27d1c2c442ab\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.597730 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-combined-ca-bundle\") pod \"40129c18-e920-4ead-a7d2-27d1c2c442ab\" (UID: \"40129c18-e920-4ead-a7d2-27d1c2c442ab\") " Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.604982 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40129c18-e920-4ead-a7d2-27d1c2c442ab-kube-api-access-ql2cw" (OuterVolumeSpecName: "kube-api-access-ql2cw") pod "40129c18-e920-4ead-a7d2-27d1c2c442ab" (UID: "40129c18-e920-4ead-a7d2-27d1c2c442ab"). InnerVolumeSpecName "kube-api-access-ql2cw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.641580 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40129c18-e920-4ead-a7d2-27d1c2c442ab" (UID: "40129c18-e920-4ead-a7d2-27d1c2c442ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.684496 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-config-data" (OuterVolumeSpecName: "config-data") pod "40129c18-e920-4ead-a7d2-27d1c2c442ab" (UID: "40129c18-e920-4ead-a7d2-27d1c2c442ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.701583 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ql2cw\" (UniqueName: \"kubernetes.io/projected/40129c18-e920-4ead-a7d2-27d1c2c442ab-kube-api-access-ql2cw\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.701740 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:10 crc kubenswrapper[4861]: I0129 08:18:10.701751 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40129c18-e920-4ead-a7d2-27d1c2c442ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:11 crc kubenswrapper[4861]: I0129 08:18:11.056891 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-lmzn7" event={"ID":"40129c18-e920-4ead-a7d2-27d1c2c442ab","Type":"ContainerDied","Data":"dded10f997b3cb9ae1b30066e4595003fda99db0b049a9bbd7cf4d7484bce0a2"} Jan 29 08:18:11 crc kubenswrapper[4861]: I0129 08:18:11.056930 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dded10f997b3cb9ae1b30066e4595003fda99db0b049a9bbd7cf4d7484bce0a2" Jan 29 08:18:11 crc kubenswrapper[4861]: I0129 08:18:11.056990 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-lmzn7" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.268784 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-7dcf9f6d95-mhgt2"] Jan 29 08:18:12 crc kubenswrapper[4861]: E0129 08:18:12.269675 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40129c18-e920-4ead-a7d2-27d1c2c442ab" containerName="heat-db-sync" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.269694 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="40129c18-e920-4ead-a7d2-27d1c2c442ab" containerName="heat-db-sync" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.269953 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="40129c18-e920-4ead-a7d2-27d1c2c442ab" containerName="heat-db-sync" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.270931 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.273781 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.274118 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-vxxnk" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.275590 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.304660 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7dcf9f6d95-mhgt2"] Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.339816 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data-custom\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.339939 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-combined-ca-bundle\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.339967 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.340024 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvbss\" (UniqueName: \"kubernetes.io/projected/11f09089-193b-44e3-81c6-fd841caf0812-kube-api-access-tvbss\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.433402 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-dd5c95ff-75gtv"] Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.434723 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.438917 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.442782 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data-custom\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.442894 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-combined-ca-bundle\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.442941 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.442999 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvbss\" (UniqueName: \"kubernetes.io/projected/11f09089-193b-44e3-81c6-fd841caf0812-kube-api-access-tvbss\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.449120 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data-custom\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.452499 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.507850 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-combined-ca-bundle\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.512894 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvbss\" (UniqueName: \"kubernetes.io/projected/11f09089-193b-44e3-81c6-fd841caf0812-kube-api-access-tvbss\") pod \"heat-engine-7dcf9f6d95-mhgt2\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.513141 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-dd5c95ff-75gtv"] Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.544285 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.544376 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-combined-ca-bundle\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.544420 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mktzn\" (UniqueName: \"kubernetes.io/projected/0f8e1690-060a-4f86-90f5-52f9f094a8ae-kube-api-access-mktzn\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.544450 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data-custom\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.551666 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7c47f9fdb7-wmb6g"] Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.553094 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.555915 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.563966 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7c47f9fdb7-wmb6g"] Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.601033 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.646044 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data-custom\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.646192 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.646236 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7np85\" (UniqueName: \"kubernetes.io/projected/0c536edb-e99b-463f-918e-ef03f76c504c-kube-api-access-7np85\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.646270 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-combined-ca-bundle\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.646297 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.646325 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mktzn\" (UniqueName: \"kubernetes.io/projected/0f8e1690-060a-4f86-90f5-52f9f094a8ae-kube-api-access-mktzn\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.646373 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-combined-ca-bundle\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.646442 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data-custom\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.649634 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-combined-ca-bundle\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.651190 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.651296 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data-custom\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.662754 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mktzn\" (UniqueName: \"kubernetes.io/projected/0f8e1690-060a-4f86-90f5-52f9f094a8ae-kube-api-access-mktzn\") pod \"heat-api-dd5c95ff-75gtv\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.748490 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data-custom\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.748557 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7np85\" (UniqueName: \"kubernetes.io/projected/0c536edb-e99b-463f-918e-ef03f76c504c-kube-api-access-7np85\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.748593 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.748623 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-combined-ca-bundle\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.761727 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data-custom\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.762832 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-combined-ca-bundle\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.774472 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.779871 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7np85\" (UniqueName: \"kubernetes.io/projected/0c536edb-e99b-463f-918e-ef03f76c504c-kube-api-access-7np85\") pod \"heat-cfnapi-7c47f9fdb7-wmb6g\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.903620 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:12 crc kubenswrapper[4861]: I0129 08:18:12.912643 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:13 crc kubenswrapper[4861]: I0129 08:18:13.130679 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7dcf9f6d95-mhgt2"] Jan 29 08:18:13 crc kubenswrapper[4861]: W0129 08:18:13.134676 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11f09089_193b_44e3_81c6_fd841caf0812.slice/crio-118003bf458a30060cfe82e6aed5097306ed751272ff459d631260d924988b06 WatchSource:0}: Error finding container 118003bf458a30060cfe82e6aed5097306ed751272ff459d631260d924988b06: Status 404 returned error can't find the container with id 118003bf458a30060cfe82e6aed5097306ed751272ff459d631260d924988b06 Jan 29 08:18:13 crc kubenswrapper[4861]: W0129 08:18:13.457308 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c536edb_e99b_463f_918e_ef03f76c504c.slice/crio-d0a6e86d8591753a71d8d03a6d08fb64fc159e28ec2d918ff8412a8dbd5d3934 WatchSource:0}: Error finding container d0a6e86d8591753a71d8d03a6d08fb64fc159e28ec2d918ff8412a8dbd5d3934: Status 404 returned error can't find the container with id d0a6e86d8591753a71d8d03a6d08fb64fc159e28ec2d918ff8412a8dbd5d3934 Jan 29 08:18:13 crc kubenswrapper[4861]: I0129 08:18:13.458785 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7c47f9fdb7-wmb6g"] Jan 29 08:18:13 crc kubenswrapper[4861]: I0129 08:18:13.473977 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-dd5c95ff-75gtv"] Jan 29 08:18:13 crc kubenswrapper[4861]: W0129 08:18:13.479853 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f8e1690_060a_4f86_90f5_52f9f094a8ae.slice/crio-df9db8f17dd94c056f3885f7bfc6726f69c6820e60eaedf57225e22ec93d1a70 WatchSource:0}: Error finding container df9db8f17dd94c056f3885f7bfc6726f69c6820e60eaedf57225e22ec93d1a70: Status 404 returned error can't find the container with id df9db8f17dd94c056f3885f7bfc6726f69c6820e60eaedf57225e22ec93d1a70 Jan 29 08:18:14 crc kubenswrapper[4861]: I0129 08:18:14.085379 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" event={"ID":"0c536edb-e99b-463f-918e-ef03f76c504c","Type":"ContainerStarted","Data":"d0a6e86d8591753a71d8d03a6d08fb64fc159e28ec2d918ff8412a8dbd5d3934"} Jan 29 08:18:14 crc kubenswrapper[4861]: I0129 08:18:14.086749 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-dd5c95ff-75gtv" event={"ID":"0f8e1690-060a-4f86-90f5-52f9f094a8ae","Type":"ContainerStarted","Data":"df9db8f17dd94c056f3885f7bfc6726f69c6820e60eaedf57225e22ec93d1a70"} Jan 29 08:18:14 crc kubenswrapper[4861]: I0129 08:18:14.089971 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" event={"ID":"11f09089-193b-44e3-81c6-fd841caf0812","Type":"ContainerStarted","Data":"4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7"} Jan 29 08:18:14 crc kubenswrapper[4861]: I0129 08:18:14.090018 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" event={"ID":"11f09089-193b-44e3-81c6-fd841caf0812","Type":"ContainerStarted","Data":"118003bf458a30060cfe82e6aed5097306ed751272ff459d631260d924988b06"} Jan 29 08:18:14 crc kubenswrapper[4861]: I0129 08:18:14.090233 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:14 crc kubenswrapper[4861]: I0129 08:18:14.109939 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" podStartSLOduration=2.10992192 podStartE2EDuration="2.10992192s" podCreationTimestamp="2026-01-29 08:18:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:18:14.103143642 +0000 UTC m=+6185.774638209" watchObservedRunningTime="2026-01-29 08:18:14.10992192 +0000 UTC m=+6185.781416477" Jan 29 08:18:14 crc kubenswrapper[4861]: I0129 08:18:14.702538 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.108632 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" event={"ID":"0c536edb-e99b-463f-918e-ef03f76c504c","Type":"ContainerStarted","Data":"9e9c9c7d78e43f7f5db367e2b3baac84b6b2a0898eb09fde35a37783a7052645"} Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.109426 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.111006 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-dd5c95ff-75gtv" event={"ID":"0f8e1690-060a-4f86-90f5-52f9f094a8ae","Type":"ContainerStarted","Data":"bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019"} Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.111244 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.128832 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" podStartSLOduration=1.843495093 podStartE2EDuration="4.128815664s" podCreationTimestamp="2026-01-29 08:18:12 +0000 UTC" firstStartedPulling="2026-01-29 08:18:13.459394582 +0000 UTC m=+6185.130889139" lastFinishedPulling="2026-01-29 08:18:15.744715153 +0000 UTC m=+6187.416209710" observedRunningTime="2026-01-29 08:18:16.123009592 +0000 UTC m=+6187.794504149" watchObservedRunningTime="2026-01-29 08:18:16.128815664 +0000 UTC m=+6187.800310221" Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.152408 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-dd5c95ff-75gtv" podStartSLOduration=1.887737616 podStartE2EDuration="4.152388464s" podCreationTimestamp="2026-01-29 08:18:12 +0000 UTC" firstStartedPulling="2026-01-29 08:18:13.48253029 +0000 UTC m=+6185.154024847" lastFinishedPulling="2026-01-29 08:18:15.747181138 +0000 UTC m=+6187.418675695" observedRunningTime="2026-01-29 08:18:16.144017214 +0000 UTC m=+6187.815511781" watchObservedRunningTime="2026-01-29 08:18:16.152388464 +0000 UTC m=+6187.823883021" Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.792315 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-65994449bb-mb4xq" Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.880814 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6f4b54cb6-vdr9c"] Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.881156 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6f4b54cb6-vdr9c" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon" containerID="cri-o://28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec" gracePeriod=30 Jan 29 08:18:16 crc kubenswrapper[4861]: I0129 08:18:16.882286 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6f4b54cb6-vdr9c" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon-log" containerID="cri-o://eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027" gracePeriod=30 Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.771876 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-bf66d5877-52r4g"] Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.774266 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.808163 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-bf66d5877-52r4g"] Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.810590 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-config-data\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.810650 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-config-data-custom\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.810698 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdbvx\" (UniqueName: \"kubernetes.io/projected/c283e31a-5c70-4767-9a37-28a0778db9d3-kube-api-access-rdbvx\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.810998 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-combined-ca-bundle\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.821207 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-684c6cff65-gz65z"] Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.822555 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.837686 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-64cfbc9cd6-qrq94"] Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.838877 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.851171 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-684c6cff65-gz65z"] Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.860182 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-64cfbc9cd6-qrq94"] Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913321 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-combined-ca-bundle\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913390 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-combined-ca-bundle\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913494 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913553 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913577 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-combined-ca-bundle\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913604 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data-custom\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913653 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk78p\" (UniqueName: \"kubernetes.io/projected/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-kube-api-access-tk78p\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913702 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-config-data\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913765 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-config-data-custom\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913835 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g95p\" (UniqueName: \"kubernetes.io/projected/ea3eb692-8484-4511-9f14-e3abb700817b-kube-api-access-6g95p\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913881 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdbvx\" (UniqueName: \"kubernetes.io/projected/c283e31a-5c70-4767-9a37-28a0778db9d3-kube-api-access-rdbvx\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.913913 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data-custom\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.921475 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-config-data\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.922143 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-combined-ca-bundle\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.928459 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c283e31a-5c70-4767-9a37-28a0778db9d3-config-data-custom\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:19 crc kubenswrapper[4861]: I0129 08:18:19.928656 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdbvx\" (UniqueName: \"kubernetes.io/projected/c283e31a-5c70-4767-9a37-28a0778db9d3-kube-api-access-rdbvx\") pod \"heat-engine-bf66d5877-52r4g\" (UID: \"c283e31a-5c70-4767-9a37-28a0778db9d3\") " pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.014820 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g95p\" (UniqueName: \"kubernetes.io/projected/ea3eb692-8484-4511-9f14-e3abb700817b-kube-api-access-6g95p\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.014871 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data-custom\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.014905 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-combined-ca-bundle\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.014923 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-combined-ca-bundle\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.014963 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.014993 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.015018 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data-custom\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.015051 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk78p\" (UniqueName: \"kubernetes.io/projected/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-kube-api-access-tk78p\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.020024 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data-custom\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.020325 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-combined-ca-bundle\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.021119 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.021266 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data-custom\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.022445 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6f4b54cb6-vdr9c" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.126:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:42730->10.217.1.126:8443: read: connection reset by peer" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.025012 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.032788 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-combined-ca-bundle\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.035831 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk78p\" (UniqueName: \"kubernetes.io/projected/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-kube-api-access-tk78p\") pod \"heat-cfnapi-64cfbc9cd6-qrq94\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.036389 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g95p\" (UniqueName: \"kubernetes.io/projected/ea3eb692-8484-4511-9f14-e3abb700817b-kube-api-access-6g95p\") pod \"heat-api-684c6cff65-gz65z\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.093721 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.145674 4861 generic.go:334] "Generic (PLEG): container finished" podID="c569ed14-e911-4ead-ada8-270f32a1297f" containerID="28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec" exitCode=0 Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.145893 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f4b54cb6-vdr9c" event={"ID":"c569ed14-e911-4ead-ada8-270f32a1297f","Type":"ContainerDied","Data":"28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec"} Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.162545 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.170368 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.704897 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-64cfbc9cd6-qrq94"] Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.713549 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-684c6cff65-gz65z"] Jan 29 08:18:20 crc kubenswrapper[4861]: W0129 08:18:20.716178 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc283e31a_5c70_4767_9a37_28a0778db9d3.slice/crio-2b4df5ec923857715d9c5df28e9dc54777d3fdbc52dbec833781ab7b1ae36806 WatchSource:0}: Error finding container 2b4df5ec923857715d9c5df28e9dc54777d3fdbc52dbec833781ab7b1ae36806: Status 404 returned error can't find the container with id 2b4df5ec923857715d9c5df28e9dc54777d3fdbc52dbec833781ab7b1ae36806 Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.726377 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-bf66d5877-52r4g"] Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.877791 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-dd5c95ff-75gtv"] Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.878301 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-dd5c95ff-75gtv" podUID="0f8e1690-060a-4f86-90f5-52f9f094a8ae" containerName="heat-api" containerID="cri-o://bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019" gracePeriod=60 Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.897772 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7c47f9fdb7-wmb6g"] Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.897967 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" podUID="0c536edb-e99b-463f-918e-ef03f76c504c" containerName="heat-cfnapi" containerID="cri-o://9e9c9c7d78e43f7f5db367e2b3baac84b6b2a0898eb09fde35a37783a7052645" gracePeriod=60 Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.911256 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-596f6c779b-zwj48"] Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.912580 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.918376 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.918642 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.953026 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-668fc5bbd6-8whvv"] Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.954309 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.961956 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.962113 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.966365 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-internal-tls-certs\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.966415 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-config-data\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.966627 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blx4w\" (UniqueName: \"kubernetes.io/projected/09f79ef4-29fe-49af-bc34-175306e9211a-kube-api-access-blx4w\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.967680 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-public-tls-certs\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.967732 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-combined-ca-bundle\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:20 crc kubenswrapper[4861]: I0129 08:18:20.967802 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-config-data-custom\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.009033 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-596f6c779b-zwj48"] Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.022879 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-668fc5bbd6-8whvv"] Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069655 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw8vx\" (UniqueName: \"kubernetes.io/projected/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-kube-api-access-tw8vx\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069724 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-public-tls-certs\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069752 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-combined-ca-bundle\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069772 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-public-tls-certs\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069806 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-config-data-custom\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069830 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-internal-tls-certs\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069849 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-config-data\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069867 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-config-data\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069899 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-combined-ca-bundle\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069921 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-internal-tls-certs\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.069970 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blx4w\" (UniqueName: \"kubernetes.io/projected/09f79ef4-29fe-49af-bc34-175306e9211a-kube-api-access-blx4w\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.070019 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-config-data-custom\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.075865 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-internal-tls-certs\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.075942 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-config-data-custom\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.076551 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-combined-ca-bundle\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.076996 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-public-tls-certs\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.077841 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09f79ef4-29fe-49af-bc34-175306e9211a-config-data\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.088340 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blx4w\" (UniqueName: \"kubernetes.io/projected/09f79ef4-29fe-49af-bc34-175306e9211a-kube-api-access-blx4w\") pod \"heat-api-596f6c779b-zwj48\" (UID: \"09f79ef4-29fe-49af-bc34-175306e9211a\") " pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.175116 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-public-tls-certs\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.175295 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-config-data\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.175369 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-combined-ca-bundle\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.175426 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-internal-tls-certs\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.175648 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-config-data-custom\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.175708 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw8vx\" (UniqueName: \"kubernetes.io/projected/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-kube-api-access-tw8vx\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.185323 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-config-data\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.191284 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-combined-ca-bundle\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.191739 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-config-data-custom\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.200492 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-internal-tls-certs\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.209255 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" event={"ID":"0c536edb-e99b-463f-918e-ef03f76c504c","Type":"ContainerDied","Data":"9e9c9c7d78e43f7f5db367e2b3baac84b6b2a0898eb09fde35a37783a7052645"} Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.209260 4861 generic.go:334] "Generic (PLEG): container finished" podID="0c536edb-e99b-463f-918e-ef03f76c504c" containerID="9e9c9c7d78e43f7f5db367e2b3baac84b6b2a0898eb09fde35a37783a7052645" exitCode=0 Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.211104 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw8vx\" (UniqueName: \"kubernetes.io/projected/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-kube-api-access-tw8vx\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.211106 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a45c4bb9-8e82-45ac-b264-72f2325e8c3f-public-tls-certs\") pod \"heat-cfnapi-668fc5bbd6-8whvv\" (UID: \"a45c4bb9-8e82-45ac-b264-72f2325e8c3f\") " pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.214554 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-bf66d5877-52r4g" event={"ID":"c283e31a-5c70-4767-9a37-28a0778db9d3","Type":"ContainerStarted","Data":"2f6b93129c881aac952a6918eed83469470162246aa90eec2d1865e37b699d88"} Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.214598 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-bf66d5877-52r4g" event={"ID":"c283e31a-5c70-4767-9a37-28a0778db9d3","Type":"ContainerStarted","Data":"2b4df5ec923857715d9c5df28e9dc54777d3fdbc52dbec833781ab7b1ae36806"} Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.214639 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.224721 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-684c6cff65-gz65z" event={"ID":"ea3eb692-8484-4511-9f14-e3abb700817b","Type":"ContainerStarted","Data":"932095fd3d52ad4fac407851c26178d3bae47ef6514a418e2af5b0eac0b89cf9"} Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.224773 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-684c6cff65-gz65z" event={"ID":"ea3eb692-8484-4511-9f14-e3abb700817b","Type":"ContainerStarted","Data":"d0ce63df49234ba0cde951489a0bd1f1efa8b160fb2d4b7af366173514d26ff9"} Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.224884 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.259208 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" event={"ID":"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2","Type":"ContainerStarted","Data":"ceea5f4eff54e916887b427befc9f8f529ece232f45f8abaeaaf0bb8727409ce"} Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.259539 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" event={"ID":"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2","Type":"ContainerStarted","Data":"e0c79d53053c4784d072ceca2779396ba6b24fb98d6fadf438cf73d7c810ab3f"} Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.260382 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.275607 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-bf66d5877-52r4g" podStartSLOduration=2.275588249 podStartE2EDuration="2.275588249s" podCreationTimestamp="2026-01-29 08:18:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:18:21.252586434 +0000 UTC m=+6192.924081001" watchObservedRunningTime="2026-01-29 08:18:21.275588249 +0000 UTC m=+6192.947082806" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.294216 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-684c6cff65-gz65z" podStartSLOduration=2.294184388 podStartE2EDuration="2.294184388s" podCreationTimestamp="2026-01-29 08:18:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:18:21.272556729 +0000 UTC m=+6192.944051276" watchObservedRunningTime="2026-01-29 08:18:21.294184388 +0000 UTC m=+6192.965678945" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.297529 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" podStartSLOduration=2.297515546 podStartE2EDuration="2.297515546s" podCreationTimestamp="2026-01-29 08:18:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:18:21.288783396 +0000 UTC m=+6192.960277973" watchObservedRunningTime="2026-01-29 08:18:21.297515546 +0000 UTC m=+6192.969010103" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.336237 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.357648 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.486168 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.594608 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data\") pod \"0c536edb-e99b-463f-918e-ef03f76c504c\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.595016 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data-custom\") pod \"0c536edb-e99b-463f-918e-ef03f76c504c\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.595176 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-combined-ca-bundle\") pod \"0c536edb-e99b-463f-918e-ef03f76c504c\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.597416 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7np85\" (UniqueName: \"kubernetes.io/projected/0c536edb-e99b-463f-918e-ef03f76c504c-kube-api-access-7np85\") pod \"0c536edb-e99b-463f-918e-ef03f76c504c\" (UID: \"0c536edb-e99b-463f-918e-ef03f76c504c\") " Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.600852 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0c536edb-e99b-463f-918e-ef03f76c504c" (UID: "0c536edb-e99b-463f-918e-ef03f76c504c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.603096 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c536edb-e99b-463f-918e-ef03f76c504c-kube-api-access-7np85" (OuterVolumeSpecName: "kube-api-access-7np85") pod "0c536edb-e99b-463f-918e-ef03f76c504c" (UID: "0c536edb-e99b-463f-918e-ef03f76c504c"). InnerVolumeSpecName "kube-api-access-7np85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.636205 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c536edb-e99b-463f-918e-ef03f76c504c" (UID: "0c536edb-e99b-463f-918e-ef03f76c504c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.657367 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data" (OuterVolumeSpecName: "config-data") pod "0c536edb-e99b-463f-918e-ef03f76c504c" (UID: "0c536edb-e99b-463f-918e-ef03f76c504c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.699923 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7np85\" (UniqueName: \"kubernetes.io/projected/0c536edb-e99b-463f-918e-ef03f76c504c-kube-api-access-7np85\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.699961 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.699973 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.699981 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c536edb-e99b-463f-918e-ef03f76c504c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.718786 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.800903 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data\") pod \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.800981 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mktzn\" (UniqueName: \"kubernetes.io/projected/0f8e1690-060a-4f86-90f5-52f9f094a8ae-kube-api-access-mktzn\") pod \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.801022 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data-custom\") pod \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.801180 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-combined-ca-bundle\") pod \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\" (UID: \"0f8e1690-060a-4f86-90f5-52f9f094a8ae\") " Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.804799 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f8e1690-060a-4f86-90f5-52f9f094a8ae-kube-api-access-mktzn" (OuterVolumeSpecName: "kube-api-access-mktzn") pod "0f8e1690-060a-4f86-90f5-52f9f094a8ae" (UID: "0f8e1690-060a-4f86-90f5-52f9f094a8ae"). InnerVolumeSpecName "kube-api-access-mktzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.806827 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0f8e1690-060a-4f86-90f5-52f9f094a8ae" (UID: "0f8e1690-060a-4f86-90f5-52f9f094a8ae"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.830268 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f8e1690-060a-4f86-90f5-52f9f094a8ae" (UID: "0f8e1690-060a-4f86-90f5-52f9f094a8ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.867311 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data" (OuterVolumeSpecName: "config-data") pod "0f8e1690-060a-4f86-90f5-52f9f094a8ae" (UID: "0f8e1690-060a-4f86-90f5-52f9f094a8ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.905216 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.905262 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mktzn\" (UniqueName: \"kubernetes.io/projected/0f8e1690-060a-4f86-90f5-52f9f094a8ae-kube-api-access-mktzn\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.905275 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.905285 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8e1690-060a-4f86-90f5-52f9f094a8ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.913708 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-596f6c779b-zwj48"] Jan 29 08:18:21 crc kubenswrapper[4861]: W0129 08:18:21.922340 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09f79ef4_29fe_49af_bc34_175306e9211a.slice/crio-3c7173fd8c4195287923746344ee9186a434f2e7e300b307dc68ad42b9eba489 WatchSource:0}: Error finding container 3c7173fd8c4195287923746344ee9186a434f2e7e300b307dc68ad42b9eba489: Status 404 returned error can't find the container with id 3c7173fd8c4195287923746344ee9186a434f2e7e300b307dc68ad42b9eba489 Jan 29 08:18:21 crc kubenswrapper[4861]: I0129 08:18:21.922374 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-668fc5bbd6-8whvv"] Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.270985 4861 generic.go:334] "Generic (PLEG): container finished" podID="ea3eb692-8484-4511-9f14-e3abb700817b" containerID="932095fd3d52ad4fac407851c26178d3bae47ef6514a418e2af5b0eac0b89cf9" exitCode=1 Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.271102 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-684c6cff65-gz65z" event={"ID":"ea3eb692-8484-4511-9f14-e3abb700817b","Type":"ContainerDied","Data":"932095fd3d52ad4fac407851c26178d3bae47ef6514a418e2af5b0eac0b89cf9"} Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.271630 4861 scope.go:117] "RemoveContainer" containerID="932095fd3d52ad4fac407851c26178d3bae47ef6514a418e2af5b0eac0b89cf9" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.274205 4861 generic.go:334] "Generic (PLEG): container finished" podID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" containerID="ceea5f4eff54e916887b427befc9f8f529ece232f45f8abaeaaf0bb8727409ce" exitCode=1 Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.274305 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" event={"ID":"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2","Type":"ContainerDied","Data":"ceea5f4eff54e916887b427befc9f8f529ece232f45f8abaeaaf0bb8727409ce"} Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.274934 4861 scope.go:117] "RemoveContainer" containerID="ceea5f4eff54e916887b427befc9f8f529ece232f45f8abaeaaf0bb8727409ce" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.277725 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" event={"ID":"a45c4bb9-8e82-45ac-b264-72f2325e8c3f","Type":"ContainerStarted","Data":"f439f9c7cd1da00afcd5febff136bc5b627c53e45b8c00c4412737c68f1b38a4"} Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.277784 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" event={"ID":"a45c4bb9-8e82-45ac-b264-72f2325e8c3f","Type":"ContainerStarted","Data":"34e64bab0550576b74ade78fea845a7f39f53c93c9ded447f4eb9563bd0d8787"} Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.277993 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.301278 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" event={"ID":"0c536edb-e99b-463f-918e-ef03f76c504c","Type":"ContainerDied","Data":"d0a6e86d8591753a71d8d03a6d08fb64fc159e28ec2d918ff8412a8dbd5d3934"} Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.301321 4861 scope.go:117] "RemoveContainer" containerID="9e9c9c7d78e43f7f5db367e2b3baac84b6b2a0898eb09fde35a37783a7052645" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.301439 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c47f9fdb7-wmb6g" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.316921 4861 generic.go:334] "Generic (PLEG): container finished" podID="0f8e1690-060a-4f86-90f5-52f9f094a8ae" containerID="bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019" exitCode=0 Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.317007 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-dd5c95ff-75gtv" event={"ID":"0f8e1690-060a-4f86-90f5-52f9f094a8ae","Type":"ContainerDied","Data":"bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019"} Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.317035 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-dd5c95ff-75gtv" event={"ID":"0f8e1690-060a-4f86-90f5-52f9f094a8ae","Type":"ContainerDied","Data":"df9db8f17dd94c056f3885f7bfc6726f69c6820e60eaedf57225e22ec93d1a70"} Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.317097 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-dd5c95ff-75gtv" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.319822 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-596f6c779b-zwj48" event={"ID":"09f79ef4-29fe-49af-bc34-175306e9211a","Type":"ContainerStarted","Data":"3c7173fd8c4195287923746344ee9186a434f2e7e300b307dc68ad42b9eba489"} Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.320167 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.323002 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" podStartSLOduration=2.322985474 podStartE2EDuration="2.322985474s" podCreationTimestamp="2026-01-29 08:18:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:18:22.322431269 +0000 UTC m=+6193.993925826" watchObservedRunningTime="2026-01-29 08:18:22.322985474 +0000 UTC m=+6193.994480031" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.339802 4861 scope.go:117] "RemoveContainer" containerID="bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.380080 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-596f6c779b-zwj48" podStartSLOduration=2.380052004 podStartE2EDuration="2.380052004s" podCreationTimestamp="2026-01-29 08:18:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:18:22.374383015 +0000 UTC m=+6194.045877592" watchObservedRunningTime="2026-01-29 08:18:22.380052004 +0000 UTC m=+6194.051546561" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.418322 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-dd5c95ff-75gtv"] Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.425331 4861 scope.go:117] "RemoveContainer" containerID="bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.427844 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-dd5c95ff-75gtv"] Jan 29 08:18:22 crc kubenswrapper[4861]: E0129 08:18:22.429201 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019\": container with ID starting with bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019 not found: ID does not exist" containerID="bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.429238 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019"} err="failed to get container status \"bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019\": rpc error: code = NotFound desc = could not find container \"bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019\": container with ID starting with bbb3ec1063eaac72a691f6d24b149e31a15bae0d39c52d3b46031abdfbce3019 not found: ID does not exist" Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.436959 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7c47f9fdb7-wmb6g"] Jan 29 08:18:22 crc kubenswrapper[4861]: I0129 08:18:22.444535 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7c47f9fdb7-wmb6g"] Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.149674 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c536edb-e99b-463f-918e-ef03f76c504c" path="/var/lib/kubelet/pods/0c536edb-e99b-463f-918e-ef03f76c504c/volumes" Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.150595 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f8e1690-060a-4f86-90f5-52f9f094a8ae" path="/var/lib/kubelet/pods/0f8e1690-060a-4f86-90f5-52f9f094a8ae/volumes" Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.330113 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-596f6c779b-zwj48" event={"ID":"09f79ef4-29fe-49af-bc34-175306e9211a","Type":"ContainerStarted","Data":"0fd9e1e79ee2b32c47d75c5de33bc987c0ac4669cd0e493adcb5b925d9371232"} Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.332005 4861 generic.go:334] "Generic (PLEG): container finished" podID="ea3eb692-8484-4511-9f14-e3abb700817b" containerID="fc7cdc751d2d4dfbb2eb222607922529e658592c8aa36e7542c0146777881599" exitCode=1 Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.332066 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-684c6cff65-gz65z" event={"ID":"ea3eb692-8484-4511-9f14-e3abb700817b","Type":"ContainerDied","Data":"fc7cdc751d2d4dfbb2eb222607922529e658592c8aa36e7542c0146777881599"} Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.332233 4861 scope.go:117] "RemoveContainer" containerID="932095fd3d52ad4fac407851c26178d3bae47ef6514a418e2af5b0eac0b89cf9" Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.332407 4861 scope.go:117] "RemoveContainer" containerID="fc7cdc751d2d4dfbb2eb222607922529e658592c8aa36e7542c0146777881599" Jan 29 08:18:23 crc kubenswrapper[4861]: E0129 08:18:23.332652 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-684c6cff65-gz65z_openstack(ea3eb692-8484-4511-9f14-e3abb700817b)\"" pod="openstack/heat-api-684c6cff65-gz65z" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.337678 4861 generic.go:334] "Generic (PLEG): container finished" podID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" containerID="f45b3750c9bbbaaec2f91bb8088845633261412c9cd375ebd095771f21eb191b" exitCode=1 Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.337729 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" event={"ID":"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2","Type":"ContainerDied","Data":"f45b3750c9bbbaaec2f91bb8088845633261412c9cd375ebd095771f21eb191b"} Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.338411 4861 scope.go:117] "RemoveContainer" containerID="f45b3750c9bbbaaec2f91bb8088845633261412c9cd375ebd095771f21eb191b" Jan 29 08:18:23 crc kubenswrapper[4861]: E0129 08:18:23.338651 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-64cfbc9cd6-qrq94_openstack(1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2)\"" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" Jan 29 08:18:23 crc kubenswrapper[4861]: I0129 08:18:23.406989 4861 scope.go:117] "RemoveContainer" containerID="ceea5f4eff54e916887b427befc9f8f529ece232f45f8abaeaaf0bb8727409ce" Jan 29 08:18:24 crc kubenswrapper[4861]: I0129 08:18:24.356460 4861 scope.go:117] "RemoveContainer" containerID="fc7cdc751d2d4dfbb2eb222607922529e658592c8aa36e7542c0146777881599" Jan 29 08:18:24 crc kubenswrapper[4861]: E0129 08:18:24.356865 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-684c6cff65-gz65z_openstack(ea3eb692-8484-4511-9f14-e3abb700817b)\"" pod="openstack/heat-api-684c6cff65-gz65z" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" Jan 29 08:18:24 crc kubenswrapper[4861]: I0129 08:18:24.364177 4861 scope.go:117] "RemoveContainer" containerID="f45b3750c9bbbaaec2f91bb8088845633261412c9cd375ebd095771f21eb191b" Jan 29 08:18:24 crc kubenswrapper[4861]: E0129 08:18:24.365135 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-64cfbc9cd6-qrq94_openstack(1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2)\"" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" Jan 29 08:18:25 crc kubenswrapper[4861]: I0129 08:18:25.162877 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:25 crc kubenswrapper[4861]: I0129 08:18:25.162958 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:25 crc kubenswrapper[4861]: I0129 08:18:25.171208 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:25 crc kubenswrapper[4861]: I0129 08:18:25.171364 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:25 crc kubenswrapper[4861]: I0129 08:18:25.377629 4861 scope.go:117] "RemoveContainer" containerID="f45b3750c9bbbaaec2f91bb8088845633261412c9cd375ebd095771f21eb191b" Jan 29 08:18:25 crc kubenswrapper[4861]: I0129 08:18:25.377968 4861 scope.go:117] "RemoveContainer" containerID="fc7cdc751d2d4dfbb2eb222607922529e658592c8aa36e7542c0146777881599" Jan 29 08:18:25 crc kubenswrapper[4861]: E0129 08:18:25.378228 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-64cfbc9cd6-qrq94_openstack(1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2)\"" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" Jan 29 08:18:25 crc kubenswrapper[4861]: E0129 08:18:25.378450 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-684c6cff65-gz65z_openstack(ea3eb692-8484-4511-9f14-e3abb700817b)\"" pod="openstack/heat-api-684c6cff65-gz65z" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" Jan 29 08:18:26 crc kubenswrapper[4861]: I0129 08:18:26.385995 4861 scope.go:117] "RemoveContainer" containerID="f45b3750c9bbbaaec2f91bb8088845633261412c9cd375ebd095771f21eb191b" Jan 29 08:18:26 crc kubenswrapper[4861]: I0129 08:18:26.386130 4861 scope.go:117] "RemoveContainer" containerID="fc7cdc751d2d4dfbb2eb222607922529e658592c8aa36e7542c0146777881599" Jan 29 08:18:26 crc kubenswrapper[4861]: E0129 08:18:26.386310 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-64cfbc9cd6-qrq94_openstack(1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2)\"" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" Jan 29 08:18:26 crc kubenswrapper[4861]: E0129 08:18:26.386353 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-684c6cff65-gz65z_openstack(ea3eb692-8484-4511-9f14-e3abb700817b)\"" pod="openstack/heat-api-684c6cff65-gz65z" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" Jan 29 08:18:27 crc kubenswrapper[4861]: I0129 08:18:27.932009 4861 scope.go:117] "RemoveContainer" containerID="e5f6edb7def145915388ddf4a4b7fdc44f1081226977ef429fc965ba384abcba" Jan 29 08:18:27 crc kubenswrapper[4861]: I0129 08:18:27.960104 4861 scope.go:117] "RemoveContainer" containerID="f1993a7dbcc29918446168ebfa5fc73df35235932eb52cab15a0144f50241f03" Jan 29 08:18:27 crc kubenswrapper[4861]: I0129 08:18:27.983965 4861 scope.go:117] "RemoveContainer" containerID="282a75b799ee1dbe75bd71f4c75d85c603c52882b4b759fa6b4390e90ff6e8c6" Jan 29 08:18:28 crc kubenswrapper[4861]: I0129 08:18:28.016826 4861 scope.go:117] "RemoveContainer" containerID="9edbd5bdbffa3d969057cc493e8c0acc3077ea2208cbb563b179e3c324002a86" Jan 29 08:18:28 crc kubenswrapper[4861]: I0129 08:18:28.112344 4861 scope.go:117] "RemoveContainer" containerID="d637967963a5e919f7aeade81fa19b4ecbd898f64b875ea830d9754fcb7a8d62" Jan 29 08:18:28 crc kubenswrapper[4861]: I0129 08:18:28.350443 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6f4b54cb6-vdr9c" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.126:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.126:8443: connect: connection refused" Jan 29 08:18:30 crc kubenswrapper[4861]: I0129 08:18:30.629695 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:18:30 crc kubenswrapper[4861]: I0129 08:18:30.630133 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:18:30 crc kubenswrapper[4861]: I0129 08:18:30.630186 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:18:30 crc kubenswrapper[4861]: I0129 08:18:30.631149 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fcca75648caaf02940cb3f4b6284809f6ed607019ae9564066e870fde1b501ad"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:18:30 crc kubenswrapper[4861]: I0129 08:18:30.631222 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://fcca75648caaf02940cb3f4b6284809f6ed607019ae9564066e870fde1b501ad" gracePeriod=600 Jan 29 08:18:31 crc kubenswrapper[4861]: I0129 08:18:31.445747 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="fcca75648caaf02940cb3f4b6284809f6ed607019ae9564066e870fde1b501ad" exitCode=0 Jan 29 08:18:31 crc kubenswrapper[4861]: I0129 08:18:31.445866 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"fcca75648caaf02940cb3f4b6284809f6ed607019ae9564066e870fde1b501ad"} Jan 29 08:18:31 crc kubenswrapper[4861]: I0129 08:18:31.446145 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b"} Jan 29 08:18:31 crc kubenswrapper[4861]: I0129 08:18:31.446182 4861 scope.go:117] "RemoveContainer" containerID="b89ce2016768cbad3871568854f2f93dfc219e76dad012f457cacf25843d10aa" Jan 29 08:18:32 crc kubenswrapper[4861]: I0129 08:18:32.627669 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-596f6c779b-zwj48" Jan 29 08:18:32 crc kubenswrapper[4861]: I0129 08:18:32.643601 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:32 crc kubenswrapper[4861]: I0129 08:18:32.696131 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-668fc5bbd6-8whvv" Jan 29 08:18:32 crc kubenswrapper[4861]: I0129 08:18:32.739187 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-684c6cff65-gz65z"] Jan 29 08:18:32 crc kubenswrapper[4861]: I0129 08:18:32.776883 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-64cfbc9cd6-qrq94"] Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.326174 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.330963 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.421275 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data-custom\") pod \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.421422 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-combined-ca-bundle\") pod \"ea3eb692-8484-4511-9f14-e3abb700817b\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.421521 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g95p\" (UniqueName: \"kubernetes.io/projected/ea3eb692-8484-4511-9f14-e3abb700817b-kube-api-access-6g95p\") pod \"ea3eb692-8484-4511-9f14-e3abb700817b\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.422378 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-combined-ca-bundle\") pod \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.422445 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data\") pod \"ea3eb692-8484-4511-9f14-e3abb700817b\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.422751 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data\") pod \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.422804 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data-custom\") pod \"ea3eb692-8484-4511-9f14-e3abb700817b\" (UID: \"ea3eb692-8484-4511-9f14-e3abb700817b\") " Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.422859 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk78p\" (UniqueName: \"kubernetes.io/projected/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-kube-api-access-tk78p\") pod \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\" (UID: \"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2\") " Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.427787 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ea3eb692-8484-4511-9f14-e3abb700817b" (UID: "ea3eb692-8484-4511-9f14-e3abb700817b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.428343 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" (UID: "1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.428348 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-kube-api-access-tk78p" (OuterVolumeSpecName: "kube-api-access-tk78p") pod "1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" (UID: "1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2"). InnerVolumeSpecName "kube-api-access-tk78p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.429750 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea3eb692-8484-4511-9f14-e3abb700817b-kube-api-access-6g95p" (OuterVolumeSpecName: "kube-api-access-6g95p") pod "ea3eb692-8484-4511-9f14-e3abb700817b" (UID: "ea3eb692-8484-4511-9f14-e3abb700817b"). InnerVolumeSpecName "kube-api-access-6g95p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.459137 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea3eb692-8484-4511-9f14-e3abb700817b" (UID: "ea3eb692-8484-4511-9f14-e3abb700817b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.463335 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" (UID: "1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.479326 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.480421 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-64cfbc9cd6-qrq94" event={"ID":"1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2","Type":"ContainerDied","Data":"e0c79d53053c4784d072ceca2779396ba6b24fb98d6fadf438cf73d7c810ab3f"} Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.480459 4861 scope.go:117] "RemoveContainer" containerID="f45b3750c9bbbaaec2f91bb8088845633261412c9cd375ebd095771f21eb191b" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.484787 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-684c6cff65-gz65z" event={"ID":"ea3eb692-8484-4511-9f14-e3abb700817b","Type":"ContainerDied","Data":"d0ce63df49234ba0cde951489a0bd1f1efa8b160fb2d4b7af366173514d26ff9"} Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.484894 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-684c6cff65-gz65z" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.489453 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data" (OuterVolumeSpecName: "config-data") pod "1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" (UID: "1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.505225 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data" (OuterVolumeSpecName: "config-data") pod "ea3eb692-8484-4511-9f14-e3abb700817b" (UID: "ea3eb692-8484-4511-9f14-e3abb700817b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.525759 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g95p\" (UniqueName: \"kubernetes.io/projected/ea3eb692-8484-4511-9f14-e3abb700817b-kube-api-access-6g95p\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.525959 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.526038 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.526144 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.526251 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.526326 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk78p\" (UniqueName: \"kubernetes.io/projected/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-kube-api-access-tk78p\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.526398 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.526468 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea3eb692-8484-4511-9f14-e3abb700817b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.624195 4861 scope.go:117] "RemoveContainer" containerID="fc7cdc751d2d4dfbb2eb222607922529e658592c8aa36e7542c0146777881599" Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.816784 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-64cfbc9cd6-qrq94"] Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.826164 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-64cfbc9cd6-qrq94"] Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.837119 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-684c6cff65-gz65z"] Jan 29 08:18:33 crc kubenswrapper[4861]: I0129 08:18:33.847310 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-684c6cff65-gz65z"] Jan 29 08:18:35 crc kubenswrapper[4861]: I0129 08:18:35.127634 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" path="/var/lib/kubelet/pods/1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2/volumes" Jan 29 08:18:35 crc kubenswrapper[4861]: I0129 08:18:35.129411 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" path="/var/lib/kubelet/pods/ea3eb692-8484-4511-9f14-e3abb700817b/volumes" Jan 29 08:18:38 crc kubenswrapper[4861]: I0129 08:18:38.350701 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6f4b54cb6-vdr9c" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.126:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.126:8443: connect: connection refused" Jan 29 08:18:38 crc kubenswrapper[4861]: I0129 08:18:38.351380 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:18:40 crc kubenswrapper[4861]: I0129 08:18:40.266544 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-bf66d5877-52r4g" Jan 29 08:18:40 crc kubenswrapper[4861]: I0129 08:18:40.325155 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-7dcf9f6d95-mhgt2"] Jan 29 08:18:40 crc kubenswrapper[4861]: I0129 08:18:40.326298 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" podUID="11f09089-193b-44e3-81c6-fd841caf0812" containerName="heat-engine" containerID="cri-o://4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7" gracePeriod=60 Jan 29 08:18:42 crc kubenswrapper[4861]: E0129 08:18:42.605312 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 29 08:18:42 crc kubenswrapper[4861]: E0129 08:18:42.610427 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 29 08:18:42 crc kubenswrapper[4861]: E0129 08:18:42.612037 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 29 08:18:42 crc kubenswrapper[4861]: E0129 08:18:42.612102 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" podUID="11f09089-193b-44e3-81c6-fd841caf0812" containerName="heat-engine" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.358360 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.416006 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-secret-key\") pod \"c569ed14-e911-4ead-ada8-270f32a1297f\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.416050 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-combined-ca-bundle\") pod \"c569ed14-e911-4ead-ada8-270f32a1297f\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.416114 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-config-data\") pod \"c569ed14-e911-4ead-ada8-270f32a1297f\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.416160 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgnjm\" (UniqueName: \"kubernetes.io/projected/c569ed14-e911-4ead-ada8-270f32a1297f-kube-api-access-hgnjm\") pod \"c569ed14-e911-4ead-ada8-270f32a1297f\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.416237 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-tls-certs\") pod \"c569ed14-e911-4ead-ada8-270f32a1297f\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.416263 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-scripts\") pod \"c569ed14-e911-4ead-ada8-270f32a1297f\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.416369 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c569ed14-e911-4ead-ada8-270f32a1297f-logs\") pod \"c569ed14-e911-4ead-ada8-270f32a1297f\" (UID: \"c569ed14-e911-4ead-ada8-270f32a1297f\") " Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.419028 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c569ed14-e911-4ead-ada8-270f32a1297f-logs" (OuterVolumeSpecName: "logs") pod "c569ed14-e911-4ead-ada8-270f32a1297f" (UID: "c569ed14-e911-4ead-ada8-270f32a1297f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.451895 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "c569ed14-e911-4ead-ada8-270f32a1297f" (UID: "c569ed14-e911-4ead-ada8-270f32a1297f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.478219 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-config-data" (OuterVolumeSpecName: "config-data") pod "c569ed14-e911-4ead-ada8-270f32a1297f" (UID: "c569ed14-e911-4ead-ada8-270f32a1297f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.493288 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c569ed14-e911-4ead-ada8-270f32a1297f" (UID: "c569ed14-e911-4ead-ada8-270f32a1297f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.493567 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c569ed14-e911-4ead-ada8-270f32a1297f-kube-api-access-hgnjm" (OuterVolumeSpecName: "kube-api-access-hgnjm") pod "c569ed14-e911-4ead-ada8-270f32a1297f" (UID: "c569ed14-e911-4ead-ada8-270f32a1297f"). InnerVolumeSpecName "kube-api-access-hgnjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.499389 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-scripts" (OuterVolumeSpecName: "scripts") pod "c569ed14-e911-4ead-ada8-270f32a1297f" (UID: "c569ed14-e911-4ead-ada8-270f32a1297f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.518534 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.518575 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgnjm\" (UniqueName: \"kubernetes.io/projected/c569ed14-e911-4ead-ada8-270f32a1297f-kube-api-access-hgnjm\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.518587 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c569ed14-e911-4ead-ada8-270f32a1297f-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.518595 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c569ed14-e911-4ead-ada8-270f32a1297f-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.518605 4861 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.518614 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.522192 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "c569ed14-e911-4ead-ada8-270f32a1297f" (UID: "c569ed14-e911-4ead-ada8-270f32a1297f"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.620893 4861 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/c569ed14-e911-4ead-ada8-270f32a1297f-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.632527 4861 generic.go:334] "Generic (PLEG): container finished" podID="c569ed14-e911-4ead-ada8-270f32a1297f" containerID="eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027" exitCode=137 Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.632607 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f4b54cb6-vdr9c" event={"ID":"c569ed14-e911-4ead-ada8-270f32a1297f","Type":"ContainerDied","Data":"eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027"} Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.632638 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f4b54cb6-vdr9c" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.632670 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f4b54cb6-vdr9c" event={"ID":"c569ed14-e911-4ead-ada8-270f32a1297f","Type":"ContainerDied","Data":"a4675aa26f63c7e2e85820ff1031d541be92d709e05413f2eb5d2f5b3795da6a"} Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.632689 4861 scope.go:117] "RemoveContainer" containerID="28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.673359 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6f4b54cb6-vdr9c"] Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.682824 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6f4b54cb6-vdr9c"] Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.808297 4861 scope.go:117] "RemoveContainer" containerID="eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.828122 4861 scope.go:117] "RemoveContainer" containerID="28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec" Jan 29 08:18:47 crc kubenswrapper[4861]: E0129 08:18:47.828640 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec\": container with ID starting with 28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec not found: ID does not exist" containerID="28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.828683 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec"} err="failed to get container status \"28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec\": rpc error: code = NotFound desc = could not find container \"28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec\": container with ID starting with 28e31b4c5da5678752a3d70694a74d2a497f26322cef56eedef5f5a477671bec not found: ID does not exist" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.828708 4861 scope.go:117] "RemoveContainer" containerID="eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027" Jan 29 08:18:47 crc kubenswrapper[4861]: E0129 08:18:47.829041 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027\": container with ID starting with eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027 not found: ID does not exist" containerID="eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027" Jan 29 08:18:47 crc kubenswrapper[4861]: I0129 08:18:47.829087 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027"} err="failed to get container status \"eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027\": rpc error: code = NotFound desc = could not find container \"eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027\": container with ID starting with eebd01901fe7be121c4e8d73e209eb75d0ea988b383d6ab8f1f6da1cfe5e5027 not found: ID does not exist" Jan 29 08:18:49 crc kubenswrapper[4861]: I0129 08:18:49.131482 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" path="/var/lib/kubelet/pods/c569ed14-e911-4ead-ada8-270f32a1297f/volumes" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.229012 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.357418 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data-custom\") pod \"11f09089-193b-44e3-81c6-fd841caf0812\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.357550 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvbss\" (UniqueName: \"kubernetes.io/projected/11f09089-193b-44e3-81c6-fd841caf0812-kube-api-access-tvbss\") pod \"11f09089-193b-44e3-81c6-fd841caf0812\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.357593 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-combined-ca-bundle\") pod \"11f09089-193b-44e3-81c6-fd841caf0812\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.357641 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data\") pod \"11f09089-193b-44e3-81c6-fd841caf0812\" (UID: \"11f09089-193b-44e3-81c6-fd841caf0812\") " Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.363224 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11f09089-193b-44e3-81c6-fd841caf0812-kube-api-access-tvbss" (OuterVolumeSpecName: "kube-api-access-tvbss") pod "11f09089-193b-44e3-81c6-fd841caf0812" (UID: "11f09089-193b-44e3-81c6-fd841caf0812"). InnerVolumeSpecName "kube-api-access-tvbss". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.363825 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "11f09089-193b-44e3-81c6-fd841caf0812" (UID: "11f09089-193b-44e3-81c6-fd841caf0812"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.385031 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "11f09089-193b-44e3-81c6-fd841caf0812" (UID: "11f09089-193b-44e3-81c6-fd841caf0812"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.437815 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data" (OuterVolumeSpecName: "config-data") pod "11f09089-193b-44e3-81c6-fd841caf0812" (UID: "11f09089-193b-44e3-81c6-fd841caf0812"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.459957 4861 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.459994 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvbss\" (UniqueName: \"kubernetes.io/projected/11f09089-193b-44e3-81c6-fd841caf0812-kube-api-access-tvbss\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.460009 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.460023 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f09089-193b-44e3-81c6-fd841caf0812-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.691942 4861 generic.go:334] "Generic (PLEG): container finished" podID="11f09089-193b-44e3-81c6-fd841caf0812" containerID="4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7" exitCode=0 Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.692030 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" event={"ID":"11f09089-193b-44e3-81c6-fd841caf0812","Type":"ContainerDied","Data":"4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7"} Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.692341 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" event={"ID":"11f09089-193b-44e3-81c6-fd841caf0812","Type":"ContainerDied","Data":"118003bf458a30060cfe82e6aed5097306ed751272ff459d631260d924988b06"} Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.692368 4861 scope.go:117] "RemoveContainer" containerID="4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.692055 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7dcf9f6d95-mhgt2" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.725001 4861 scope.go:117] "RemoveContainer" containerID="4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7" Jan 29 08:18:52 crc kubenswrapper[4861]: E0129 08:18:52.725522 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7\": container with ID starting with 4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7 not found: ID does not exist" containerID="4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.725572 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7"} err="failed to get container status \"4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7\": rpc error: code = NotFound desc = could not find container \"4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7\": container with ID starting with 4ef74793c240f66ea748e3411b364e6c93f37796fa8c34ad0877e5c8541b2ba7 not found: ID does not exist" Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.726288 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-7dcf9f6d95-mhgt2"] Jan 29 08:18:52 crc kubenswrapper[4861]: I0129 08:18:52.735318 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-7dcf9f6d95-mhgt2"] Jan 29 08:18:53 crc kubenswrapper[4861]: I0129 08:18:53.132882 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11f09089-193b-44e3-81c6-fd841caf0812" path="/var/lib/kubelet/pods/11f09089-193b-44e3-81c6-fd841caf0812/volumes" Jan 29 08:18:57 crc kubenswrapper[4861]: I0129 08:18:57.041813 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-n79dl"] Jan 29 08:18:57 crc kubenswrapper[4861]: I0129 08:18:57.049789 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-8618-account-create-update-v8qq4"] Jan 29 08:18:57 crc kubenswrapper[4861]: I0129 08:18:57.063634 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-n79dl"] Jan 29 08:18:57 crc kubenswrapper[4861]: I0129 08:18:57.076446 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-8618-account-create-update-v8qq4"] Jan 29 08:18:57 crc kubenswrapper[4861]: I0129 08:18:57.135538 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4821b164-e444-4c00-a965-0bb0d722b944" path="/var/lib/kubelet/pods/4821b164-e444-4c00-a965-0bb0d722b944/volumes" Jan 29 08:18:57 crc kubenswrapper[4861]: I0129 08:18:57.138709 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="485df95d-da0f-498c-9d95-38dfe0c8be8c" path="/var/lib/kubelet/pods/485df95d-da0f-498c-9d95-38dfe0c8be8c/volumes" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.190678 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn"] Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.191431 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191451 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.191466 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon-log" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191476 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon-log" Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.191509 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191521 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon" Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.191539 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11f09089-193b-44e3-81c6-fd841caf0812" containerName="heat-engine" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191547 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="11f09089-193b-44e3-81c6-fd841caf0812" containerName="heat-engine" Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.191568 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191575 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.191587 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c536edb-e99b-463f-918e-ef03f76c504c" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191595 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c536edb-e99b-463f-918e-ef03f76c504c" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.191624 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f8e1690-060a-4f86-90f5-52f9f094a8ae" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191632 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f8e1690-060a-4f86-90f5-52f9f094a8ae" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191851 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191880 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191897 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191919 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon-log" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191934 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c569ed14-e911-4ead-ada8-270f32a1297f" containerName="horizon" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191950 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="11f09089-193b-44e3-81c6-fd841caf0812" containerName="heat-engine" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191970 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c536edb-e99b-463f-918e-ef03f76c504c" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.191986 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f8e1690-060a-4f86-90f5-52f9f094a8ae" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.192286 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.192302 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea3eb692-8484-4511-9f14-e3abb700817b" containerName="heat-api" Jan 29 08:18:59 crc kubenswrapper[4861]: E0129 08:18:59.192341 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.192352 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.192640 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a785b01-0bd4-4bdc-ad9d-24c3e931b6e2" containerName="heat-cfnapi" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.194602 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.197813 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.204787 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.204916 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.204984 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmm9h\" (UniqueName: \"kubernetes.io/projected/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-kube-api-access-wmm9h\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.217512 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn"] Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.308454 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.308550 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.308613 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmm9h\" (UniqueName: \"kubernetes.io/projected/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-kube-api-access-wmm9h\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.308987 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.309304 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.338271 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmm9h\" (UniqueName: \"kubernetes.io/projected/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-kube-api-access-wmm9h\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:18:59 crc kubenswrapper[4861]: I0129 08:18:59.520110 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:19:00 crc kubenswrapper[4861]: W0129 08:19:00.007016 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79bf7eee_2a3f_44b1_bcb0_1d1b356ed410.slice/crio-1a47f6bb26b4f87fdff56a8bdeaf8cc182177f78075daa71079f4dbe90954b8a WatchSource:0}: Error finding container 1a47f6bb26b4f87fdff56a8bdeaf8cc182177f78075daa71079f4dbe90954b8a: Status 404 returned error can't find the container with id 1a47f6bb26b4f87fdff56a8bdeaf8cc182177f78075daa71079f4dbe90954b8a Jan 29 08:19:00 crc kubenswrapper[4861]: I0129 08:19:00.022869 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn"] Jan 29 08:19:00 crc kubenswrapper[4861]: I0129 08:19:00.817194 4861 generic.go:334] "Generic (PLEG): container finished" podID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerID="ee1511ca2b97a55ff01160629619f30943e4ae47302ce1731b257965e063649b" exitCode=0 Jan 29 08:19:00 crc kubenswrapper[4861]: I0129 08:19:00.817332 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" event={"ID":"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410","Type":"ContainerDied","Data":"ee1511ca2b97a55ff01160629619f30943e4ae47302ce1731b257965e063649b"} Jan 29 08:19:00 crc kubenswrapper[4861]: I0129 08:19:00.820498 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" event={"ID":"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410","Type":"ContainerStarted","Data":"1a47f6bb26b4f87fdff56a8bdeaf8cc182177f78075daa71079f4dbe90954b8a"} Jan 29 08:19:02 crc kubenswrapper[4861]: I0129 08:19:02.840997 4861 generic.go:334] "Generic (PLEG): container finished" podID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerID="8a1bcc416ea66a3c4f2ebc59010f49db9acb485cffd3c9ce94c4bc74f278d999" exitCode=0 Jan 29 08:19:02 crc kubenswrapper[4861]: I0129 08:19:02.841131 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" event={"ID":"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410","Type":"ContainerDied","Data":"8a1bcc416ea66a3c4f2ebc59010f49db9acb485cffd3c9ce94c4bc74f278d999"} Jan 29 08:19:03 crc kubenswrapper[4861]: I0129 08:19:03.852116 4861 generic.go:334] "Generic (PLEG): container finished" podID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerID="64bdeb57252b53ea3c80e5016c053e3d0714da88a915002de71894fb2d68c3aa" exitCode=0 Jan 29 08:19:03 crc kubenswrapper[4861]: I0129 08:19:03.852205 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" event={"ID":"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410","Type":"ContainerDied","Data":"64bdeb57252b53ea3c80e5016c053e3d0714da88a915002de71894fb2d68c3aa"} Jan 29 08:19:04 crc kubenswrapper[4861]: I0129 08:19:04.064825 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-4zgn5"] Jan 29 08:19:04 crc kubenswrapper[4861]: I0129 08:19:04.076232 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-4zgn5"] Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.131914 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f79a7f94-15e1-49ff-a383-0ea07b714dd2" path="/var/lib/kubelet/pods/f79a7f94-15e1-49ff-a383-0ea07b714dd2/volumes" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.241196 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.443939 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-bundle\") pod \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.444201 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmm9h\" (UniqueName: \"kubernetes.io/projected/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-kube-api-access-wmm9h\") pod \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.444297 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-util\") pod \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\" (UID: \"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410\") " Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.448608 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-bundle" (OuterVolumeSpecName: "bundle") pod "79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" (UID: "79bf7eee-2a3f-44b1-bcb0-1d1b356ed410"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.460462 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-util" (OuterVolumeSpecName: "util") pod "79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" (UID: "79bf7eee-2a3f-44b1-bcb0-1d1b356ed410"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.461920 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-kube-api-access-wmm9h" (OuterVolumeSpecName: "kube-api-access-wmm9h") pod "79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" (UID: "79bf7eee-2a3f-44b1-bcb0-1d1b356ed410"). InnerVolumeSpecName "kube-api-access-wmm9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.546671 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmm9h\" (UniqueName: \"kubernetes.io/projected/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-kube-api-access-wmm9h\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.546720 4861 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-util\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.546733 4861 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/79bf7eee-2a3f-44b1-bcb0-1d1b356ed410-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.889597 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" event={"ID":"79bf7eee-2a3f-44b1-bcb0-1d1b356ed410","Type":"ContainerDied","Data":"1a47f6bb26b4f87fdff56a8bdeaf8cc182177f78075daa71079f4dbe90954b8a"} Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.890151 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a47f6bb26b4f87fdff56a8bdeaf8cc182177f78075daa71079f4dbe90954b8a" Jan 29 08:19:05 crc kubenswrapper[4861]: I0129 08:19:05.889713 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.690894 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll"] Jan 29 08:19:17 crc kubenswrapper[4861]: E0129 08:19:17.691655 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerName="util" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.691669 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerName="util" Jan 29 08:19:17 crc kubenswrapper[4861]: E0129 08:19:17.691688 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerName="pull" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.691693 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerName="pull" Jan 29 08:19:17 crc kubenswrapper[4861]: E0129 08:19:17.691721 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerName="extract" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.691728 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerName="extract" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.691903 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="79bf7eee-2a3f-44b1-bcb0-1d1b356ed410" containerName="extract" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.692539 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.694034 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-49l7g" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.695689 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.695959 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.719660 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll"] Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.722762 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lcvq\" (UniqueName: \"kubernetes.io/projected/93f09c4e-4102-44dd-b65f-3aa0088f14f0-kube-api-access-8lcvq\") pod \"obo-prometheus-operator-68bc856cb9-w7lll\" (UID: \"93f09c4e-4102-44dd-b65f-3aa0088f14f0\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.812718 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp"] Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.814010 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.823291 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-sp7kt" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.823485 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.824730 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lcvq\" (UniqueName: \"kubernetes.io/projected/93f09c4e-4102-44dd-b65f-3aa0088f14f0-kube-api-access-8lcvq\") pod \"obo-prometheus-operator-68bc856cb9-w7lll\" (UID: \"93f09c4e-4102-44dd-b65f-3aa0088f14f0\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.839374 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd"] Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.840699 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.853883 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp"] Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.860835 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lcvq\" (UniqueName: \"kubernetes.io/projected/93f09c4e-4102-44dd-b65f-3aa0088f14f0-kube-api-access-8lcvq\") pod \"obo-prometheus-operator-68bc856cb9-w7lll\" (UID: \"93f09c4e-4102-44dd-b65f-3aa0088f14f0\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.864612 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd"] Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.926212 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/923475bd-8c9e-4426-b3f5-d632cc1bd3a6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd\" (UID: \"923475bd-8c9e-4426-b3f5-d632cc1bd3a6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.926558 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14b0386e-4bd1-484e-a955-b8d3e390529f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp\" (UID: \"14b0386e-4bd1-484e-a955-b8d3e390529f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.926660 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/923475bd-8c9e-4426-b3f5-d632cc1bd3a6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd\" (UID: \"923475bd-8c9e-4426-b3f5-d632cc1bd3a6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" Jan 29 08:19:17 crc kubenswrapper[4861]: I0129 08:19:17.926832 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14b0386e-4bd1-484e-a955-b8d3e390529f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp\" (UID: \"14b0386e-4bd1-484e-a955-b8d3e390529f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.013050 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-pfrzn"] Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.014849 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.017298 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.020206 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-d62c8" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.022110 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.028442 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/923475bd-8c9e-4426-b3f5-d632cc1bd3a6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd\" (UID: \"923475bd-8c9e-4426-b3f5-d632cc1bd3a6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.028506 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/33374b3a-fbdd-4fc7-8743-a8f979e12ee8-observability-operator-tls\") pod \"observability-operator-59bdc8b94-pfrzn\" (UID: \"33374b3a-fbdd-4fc7-8743-a8f979e12ee8\") " pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.028550 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14b0386e-4bd1-484e-a955-b8d3e390529f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp\" (UID: \"14b0386e-4bd1-484e-a955-b8d3e390529f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.028602 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/923475bd-8c9e-4426-b3f5-d632cc1bd3a6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd\" (UID: \"923475bd-8c9e-4426-b3f5-d632cc1bd3a6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.028664 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztfr2\" (UniqueName: \"kubernetes.io/projected/33374b3a-fbdd-4fc7-8743-a8f979e12ee8-kube-api-access-ztfr2\") pod \"observability-operator-59bdc8b94-pfrzn\" (UID: \"33374b3a-fbdd-4fc7-8743-a8f979e12ee8\") " pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.028717 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14b0386e-4bd1-484e-a955-b8d3e390529f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp\" (UID: \"14b0386e-4bd1-484e-a955-b8d3e390529f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.032673 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14b0386e-4bd1-484e-a955-b8d3e390529f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp\" (UID: \"14b0386e-4bd1-484e-a955-b8d3e390529f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.032690 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/923475bd-8c9e-4426-b3f5-d632cc1bd3a6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd\" (UID: \"923475bd-8c9e-4426-b3f5-d632cc1bd3a6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.035538 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14b0386e-4bd1-484e-a955-b8d3e390529f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp\" (UID: \"14b0386e-4bd1-484e-a955-b8d3e390529f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.045494 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-pfrzn"] Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.059436 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/923475bd-8c9e-4426-b3f5-d632cc1bd3a6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd\" (UID: \"923475bd-8c9e-4426-b3f5-d632cc1bd3a6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.130333 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/33374b3a-fbdd-4fc7-8743-a8f979e12ee8-observability-operator-tls\") pod \"observability-operator-59bdc8b94-pfrzn\" (UID: \"33374b3a-fbdd-4fc7-8743-a8f979e12ee8\") " pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.130460 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztfr2\" (UniqueName: \"kubernetes.io/projected/33374b3a-fbdd-4fc7-8743-a8f979e12ee8-kube-api-access-ztfr2\") pod \"observability-operator-59bdc8b94-pfrzn\" (UID: \"33374b3a-fbdd-4fc7-8743-a8f979e12ee8\") " pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.134797 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/33374b3a-fbdd-4fc7-8743-a8f979e12ee8-observability-operator-tls\") pod \"observability-operator-59bdc8b94-pfrzn\" (UID: \"33374b3a-fbdd-4fc7-8743-a8f979e12ee8\") " pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.145676 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.146818 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztfr2\" (UniqueName: \"kubernetes.io/projected/33374b3a-fbdd-4fc7-8743-a8f979e12ee8-kube-api-access-ztfr2\") pod \"observability-operator-59bdc8b94-pfrzn\" (UID: \"33374b3a-fbdd-4fc7-8743-a8f979e12ee8\") " pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.237210 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.255829 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-flrtn"] Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.257271 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.262492 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-nrtfv" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.264478 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-flrtn"] Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.338422 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbda6774-02b4-4961-bc5e-8f022a7ca584-openshift-service-ca\") pod \"perses-operator-5bf474d74f-flrtn\" (UID: \"fbda6774-02b4-4961-bc5e-8f022a7ca584\") " pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.338473 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6hxn\" (UniqueName: \"kubernetes.io/projected/fbda6774-02b4-4961-bc5e-8f022a7ca584-kube-api-access-z6hxn\") pod \"perses-operator-5bf474d74f-flrtn\" (UID: \"fbda6774-02b4-4961-bc5e-8f022a7ca584\") " pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.363847 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.442316 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbda6774-02b4-4961-bc5e-8f022a7ca584-openshift-service-ca\") pod \"perses-operator-5bf474d74f-flrtn\" (UID: \"fbda6774-02b4-4961-bc5e-8f022a7ca584\") " pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.442551 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6hxn\" (UniqueName: \"kubernetes.io/projected/fbda6774-02b4-4961-bc5e-8f022a7ca584-kube-api-access-z6hxn\") pod \"perses-operator-5bf474d74f-flrtn\" (UID: \"fbda6774-02b4-4961-bc5e-8f022a7ca584\") " pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.443854 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbda6774-02b4-4961-bc5e-8f022a7ca584-openshift-service-ca\") pod \"perses-operator-5bf474d74f-flrtn\" (UID: \"fbda6774-02b4-4961-bc5e-8f022a7ca584\") " pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.476237 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6hxn\" (UniqueName: \"kubernetes.io/projected/fbda6774-02b4-4961-bc5e-8f022a7ca584-kube-api-access-z6hxn\") pod \"perses-operator-5bf474d74f-flrtn\" (UID: \"fbda6774-02b4-4961-bc5e-8f022a7ca584\") " pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.635325 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:18 crc kubenswrapper[4861]: I0129 08:19:18.760024 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll"] Jan 29 08:19:19 crc kubenswrapper[4861]: I0129 08:19:19.030327 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll" event={"ID":"93f09c4e-4102-44dd-b65f-3aa0088f14f0","Type":"ContainerStarted","Data":"f351d6f1f9afeeb9bc22173237cd41ebf7fd3b9ed4eabfa6edfebc36c5d984b2"} Jan 29 08:19:19 crc kubenswrapper[4861]: I0129 08:19:19.069720 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd"] Jan 29 08:19:19 crc kubenswrapper[4861]: I0129 08:19:19.106583 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp"] Jan 29 08:19:19 crc kubenswrapper[4861]: W0129 08:19:19.355865 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33374b3a_fbdd_4fc7_8743_a8f979e12ee8.slice/crio-4e059aaea906bc79de5e97be966a6208a7f729419eb87251b0df8710236391d7 WatchSource:0}: Error finding container 4e059aaea906bc79de5e97be966a6208a7f729419eb87251b0df8710236391d7: Status 404 returned error can't find the container with id 4e059aaea906bc79de5e97be966a6208a7f729419eb87251b0df8710236391d7 Jan 29 08:19:19 crc kubenswrapper[4861]: I0129 08:19:19.371557 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-pfrzn"] Jan 29 08:19:19 crc kubenswrapper[4861]: W0129 08:19:19.454734 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbda6774_02b4_4961_bc5e_8f022a7ca584.slice/crio-0999e2f5a67970922bc8a97df011088fa573b5246d85cb804a1f1220034ffd00 WatchSource:0}: Error finding container 0999e2f5a67970922bc8a97df011088fa573b5246d85cb804a1f1220034ffd00: Status 404 returned error can't find the container with id 0999e2f5a67970922bc8a97df011088fa573b5246d85cb804a1f1220034ffd00 Jan 29 08:19:19 crc kubenswrapper[4861]: I0129 08:19:19.477268 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-flrtn"] Jan 29 08:19:20 crc kubenswrapper[4861]: I0129 08:19:20.086205 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" event={"ID":"14b0386e-4bd1-484e-a955-b8d3e390529f","Type":"ContainerStarted","Data":"6fffa37c3ff3be526857935a294fec5ce086929caf3e2f2b9fec8e6c1e34467c"} Jan 29 08:19:20 crc kubenswrapper[4861]: I0129 08:19:20.102600 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" event={"ID":"923475bd-8c9e-4426-b3f5-d632cc1bd3a6","Type":"ContainerStarted","Data":"b822fb4f9152e86eeded4c814911f701f2052982fef6c21177b204f6297c64a8"} Jan 29 08:19:20 crc kubenswrapper[4861]: I0129 08:19:20.145171 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" event={"ID":"33374b3a-fbdd-4fc7-8743-a8f979e12ee8","Type":"ContainerStarted","Data":"4e059aaea906bc79de5e97be966a6208a7f729419eb87251b0df8710236391d7"} Jan 29 08:19:20 crc kubenswrapper[4861]: I0129 08:19:20.148210 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-flrtn" event={"ID":"fbda6774-02b4-4961-bc5e-8f022a7ca584","Type":"ContainerStarted","Data":"0999e2f5a67970922bc8a97df011088fa573b5246d85cb804a1f1220034ffd00"} Jan 29 08:19:28 crc kubenswrapper[4861]: I0129 08:19:28.314874 4861 scope.go:117] "RemoveContainer" containerID="14acc896d89d6c0dc0f7622d5095819283636e42c74d015ce4b693d5d7e8beeb" Jan 29 08:19:29 crc kubenswrapper[4861]: I0129 08:19:29.260856 4861 scope.go:117] "RemoveContainer" containerID="62e32f40c16d8219cbef5c6fd50a144ec6d45dc98c52d2903d81028aab099d72" Jan 29 08:19:29 crc kubenswrapper[4861]: I0129 08:19:29.303633 4861 scope.go:117] "RemoveContainer" containerID="72665c0fcf836acd6d33be965132f4888628d63adf4be4a5dbe3553f64331d40" Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.311447 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-flrtn" event={"ID":"fbda6774-02b4-4961-bc5e-8f022a7ca584","Type":"ContainerStarted","Data":"8a3be87b8a9a868478961808f1242149f0bdcb54d984661a6212101f3013e77d"} Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.311815 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.319146 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" event={"ID":"14b0386e-4bd1-484e-a955-b8d3e390529f","Type":"ContainerStarted","Data":"5c1bd2fa3a8aa6155cabdb7822ac754b4b8bce9aba0e22fb6733b99f5c5a83af"} Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.332615 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" event={"ID":"923475bd-8c9e-4426-b3f5-d632cc1bd3a6","Type":"ContainerStarted","Data":"13380c77a030691b36d04ce1ea07eba5c0e866477499ed104170da0e8ab7a6af"} Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.341967 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-flrtn" podStartSLOduration=2.538375501 podStartE2EDuration="12.341949924s" podCreationTimestamp="2026-01-29 08:19:18 +0000 UTC" firstStartedPulling="2026-01-29 08:19:19.457203929 +0000 UTC m=+6251.128698486" lastFinishedPulling="2026-01-29 08:19:29.260778352 +0000 UTC m=+6260.932272909" observedRunningTime="2026-01-29 08:19:30.335446733 +0000 UTC m=+6262.006941300" watchObservedRunningTime="2026-01-29 08:19:30.341949924 +0000 UTC m=+6262.013444481" Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.348676 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" event={"ID":"33374b3a-fbdd-4fc7-8743-a8f979e12ee8","Type":"ContainerStarted","Data":"093057eba5936e6f3b3d5dd140ce9636db18c94de6630047c4b970f2cc2861ec"} Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.349139 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.350596 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.362569 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll" event={"ID":"93f09c4e-4102-44dd-b65f-3aa0088f14f0","Type":"ContainerStarted","Data":"489012f3ac8ea4ba73e7a272f304b0ae2db12b79263a0b4d2380aa36ee5ea315"} Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.369974 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd" podStartSLOduration=3.134269775 podStartE2EDuration="13.369954991s" podCreationTimestamp="2026-01-29 08:19:17 +0000 UTC" firstStartedPulling="2026-01-29 08:19:19.085670909 +0000 UTC m=+6250.757165466" lastFinishedPulling="2026-01-29 08:19:29.321356125 +0000 UTC m=+6260.992850682" observedRunningTime="2026-01-29 08:19:30.369127569 +0000 UTC m=+6262.040622146" watchObservedRunningTime="2026-01-29 08:19:30.369954991 +0000 UTC m=+6262.041449548" Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.405415 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp" podStartSLOduration=3.259360764 podStartE2EDuration="13.405394783s" podCreationTimestamp="2026-01-29 08:19:17 +0000 UTC" firstStartedPulling="2026-01-29 08:19:19.114822445 +0000 UTC m=+6250.786317002" lastFinishedPulling="2026-01-29 08:19:29.260856444 +0000 UTC m=+6260.932351021" observedRunningTime="2026-01-29 08:19:30.400385361 +0000 UTC m=+6262.071879918" watchObservedRunningTime="2026-01-29 08:19:30.405394783 +0000 UTC m=+6262.076889340" Jan 29 08:19:30 crc kubenswrapper[4861]: I0129 08:19:30.497688 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-pfrzn" podStartSLOduration=3.53712434 podStartE2EDuration="13.49766443s" podCreationTimestamp="2026-01-29 08:19:17 +0000 UTC" firstStartedPulling="2026-01-29 08:19:19.360814195 +0000 UTC m=+6251.032308752" lastFinishedPulling="2026-01-29 08:19:29.321354285 +0000 UTC m=+6260.992848842" observedRunningTime="2026-01-29 08:19:30.488473518 +0000 UTC m=+6262.159968075" watchObservedRunningTime="2026-01-29 08:19:30.49766443 +0000 UTC m=+6262.169158987" Jan 29 08:19:38 crc kubenswrapper[4861]: I0129 08:19:38.639631 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-flrtn" Jan 29 08:19:38 crc kubenswrapper[4861]: I0129 08:19:38.660950 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-w7lll" podStartSLOduration=11.196510242 podStartE2EDuration="21.660931574s" podCreationTimestamp="2026-01-29 08:19:17 +0000 UTC" firstStartedPulling="2026-01-29 08:19:18.796432212 +0000 UTC m=+6250.467926769" lastFinishedPulling="2026-01-29 08:19:29.260853534 +0000 UTC m=+6260.932348101" observedRunningTime="2026-01-29 08:19:30.550373716 +0000 UTC m=+6262.221868283" watchObservedRunningTime="2026-01-29 08:19:38.660931574 +0000 UTC m=+6270.332426141" Jan 29 08:19:41 crc kubenswrapper[4861]: I0129 08:19:41.953450 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:41 crc kubenswrapper[4861]: I0129 08:19:41.954056 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="9ce50e42-2140-459d-9bfe-59c23bcb8ae2" containerName="openstackclient" containerID="cri-o://a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54" gracePeriod=2 Jan 29 08:19:41 crc kubenswrapper[4861]: I0129 08:19:41.982101 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.019287 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:42 crc kubenswrapper[4861]: E0129 08:19:42.019887 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ce50e42-2140-459d-9bfe-59c23bcb8ae2" containerName="openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.019953 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ce50e42-2140-459d-9bfe-59c23bcb8ae2" containerName="openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.020221 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ce50e42-2140-459d-9bfe-59c23bcb8ae2" containerName="openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.020952 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.023816 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="9ce50e42-2140-459d-9bfe-59c23bcb8ae2" podUID="310d8128-bf82-4d96-b65e-ef841b5fdbff" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.032676 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-combined-ca-bundle\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.032758 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t8gw\" (UniqueName: \"kubernetes.io/projected/310d8128-bf82-4d96-b65e-ef841b5fdbff-kube-api-access-7t8gw\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.032829 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.032867 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config-secret\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.033063 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.040227 4861 status_manager.go:875] "Failed to update status for pod" pod="openstack/openstackclient" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"310d8128-bf82-4d96-b65e-ef841b5fdbff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T08:19:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T08:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T08:19:42Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T08:19:42Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/podified-antelope-centos9/openstack-openstackclient@sha256:2b4f8494513a3af102066fec5868ab167ac8664aceb2f0c639d7a0b60260a944\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"openstackclient\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/clouds.yaml\\\",\\\"name\\\":\\\"openstack-config\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/secure.yaml\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/cloudrc\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\\\",\\\"name\\\":\\\"combined-ca-bundle\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7t8gw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T08:19:42Z\\\"}}\" for pod \"openstack\"/\"openstackclient\": pods \"openstackclient\" not found" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.055778 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.066813 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.076575 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.078014 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: E0129 08:19:42.108562 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle kube-api-access-7t8gw openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="310d8128-bf82-4d96-b65e-ef841b5fdbff" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.116555 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.121664 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="310d8128-bf82-4d96-b65e-ef841b5fdbff" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.139494 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-combined-ca-bundle\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.139587 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t8gw\" (UniqueName: \"kubernetes.io/projected/310d8128-bf82-4d96-b65e-ef841b5fdbff-kube-api-access-7t8gw\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.139654 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.139707 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.139731 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config-secret\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.139781 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config-secret\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.139888 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2lcv\" (UniqueName: \"kubernetes.io/projected/84cc4ef3-0b95-4274-90e0-34167e4ca917-kube-api-access-d2lcv\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.139962 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-combined-ca-bundle\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.142470 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: E0129 08:19:42.145335 4861 projected.go:194] Error preparing data for projected volume kube-api-access-7t8gw for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (310d8128-bf82-4d96-b65e-ef841b5fdbff) does not match the UID in record. The object might have been deleted and then recreated Jan 29 08:19:42 crc kubenswrapper[4861]: E0129 08:19:42.145396 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/310d8128-bf82-4d96-b65e-ef841b5fdbff-kube-api-access-7t8gw podName:310d8128-bf82-4d96-b65e-ef841b5fdbff nodeName:}" failed. No retries permitted until 2026-01-29 08:19:42.645379291 +0000 UTC m=+6274.316873848 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-7t8gw" (UniqueName: "kubernetes.io/projected/310d8128-bf82-4d96-b65e-ef841b5fdbff-kube-api-access-7t8gw") pod "openstackclient" (UID: "310d8128-bf82-4d96-b65e-ef841b5fdbff") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (310d8128-bf82-4d96-b65e-ef841b5fdbff) does not match the UID in record. The object might have been deleted and then recreated Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.164382 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.166142 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.166293 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-combined-ca-bundle\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.168507 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-lc9n6" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.183717 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config-secret\") pod \"openstackclient\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.203608 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.247293 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.247363 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config-secret\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.247441 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2lcv\" (UniqueName: \"kubernetes.io/projected/84cc4ef3-0b95-4274-90e0-34167e4ca917-kube-api-access-d2lcv\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.247486 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-combined-ca-bundle\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.247569 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xgbh\" (UniqueName: \"kubernetes.io/projected/5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30-kube-api-access-6xgbh\") pod \"kube-state-metrics-0\" (UID: \"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30\") " pod="openstack/kube-state-metrics-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.248326 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.256611 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config-secret\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.270761 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-combined-ca-bundle\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.277858 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2lcv\" (UniqueName: \"kubernetes.io/projected/84cc4ef3-0b95-4274-90e0-34167e4ca917-kube-api-access-d2lcv\") pod \"openstackclient\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.356664 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xgbh\" (UniqueName: \"kubernetes.io/projected/5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30-kube-api-access-6xgbh\") pod \"kube-state-metrics-0\" (UID: \"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30\") " pod="openstack/kube-state-metrics-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.374115 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xgbh\" (UniqueName: \"kubernetes.io/projected/5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30-kube-api-access-6xgbh\") pod \"kube-state-metrics-0\" (UID: \"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30\") " pod="openstack/kube-state-metrics-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.421685 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.482866 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.502053 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="310d8128-bf82-4d96-b65e-ef841b5fdbff" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.507965 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.512396 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="310d8128-bf82-4d96-b65e-ef841b5fdbff" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.644668 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.662033 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config-secret\") pod \"310d8128-bf82-4d96-b65e-ef841b5fdbff\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.662709 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-combined-ca-bundle\") pod \"310d8128-bf82-4d96-b65e-ef841b5fdbff\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.662805 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config\") pod \"310d8128-bf82-4d96-b65e-ef841b5fdbff\" (UID: \"310d8128-bf82-4d96-b65e-ef841b5fdbff\") " Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.663409 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7t8gw\" (UniqueName: \"kubernetes.io/projected/310d8128-bf82-4d96-b65e-ef841b5fdbff-kube-api-access-7t8gw\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.663659 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "310d8128-bf82-4d96-b65e-ef841b5fdbff" (UID: "310d8128-bf82-4d96-b65e-ef841b5fdbff"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.668552 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "310d8128-bf82-4d96-b65e-ef841b5fdbff" (UID: "310d8128-bf82-4d96-b65e-ef841b5fdbff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.674794 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "310d8128-bf82-4d96-b65e-ef841b5fdbff" (UID: "310d8128-bf82-4d96-b65e-ef841b5fdbff"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.764937 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.764973 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310d8128-bf82-4d96-b65e-ef841b5fdbff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.764982 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/310d8128-bf82-4d96-b65e-ef841b5fdbff-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.853469 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.855704 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.857328 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.860164 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.860425 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.861067 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-tj4kc" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.861211 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.866393 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.969837 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.969903 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e84fca0c-fb5a-450c-8b3a-3d378ff73299-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.970006 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.970191 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e84fca0c-fb5a-450c-8b3a-3d378ff73299-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.970349 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.970407 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/e84fca0c-fb5a-450c-8b3a-3d378ff73299-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:42 crc kubenswrapper[4861]: I0129 08:19:42.970620 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lhb9\" (UniqueName: \"kubernetes.io/projected/e84fca0c-fb5a-450c-8b3a-3d378ff73299-kube-api-access-6lhb9\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.072203 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/e84fca0c-fb5a-450c-8b3a-3d378ff73299-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.072307 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lhb9\" (UniqueName: \"kubernetes.io/projected/e84fca0c-fb5a-450c-8b3a-3d378ff73299-kube-api-access-6lhb9\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.072358 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.072417 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e84fca0c-fb5a-450c-8b3a-3d378ff73299-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.072507 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.072548 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e84fca0c-fb5a-450c-8b3a-3d378ff73299-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.072584 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.072737 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/e84fca0c-fb5a-450c-8b3a-3d378ff73299-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.080731 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/e84fca0c-fb5a-450c-8b3a-3d378ff73299-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.081898 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.082638 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.091577 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/e84fca0c-fb5a-450c-8b3a-3d378ff73299-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.093500 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/e84fca0c-fb5a-450c-8b3a-3d378ff73299-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.126877 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lhb9\" (UniqueName: \"kubernetes.io/projected/e84fca0c-fb5a-450c-8b3a-3d378ff73299-kube-api-access-6lhb9\") pod \"alertmanager-metric-storage-0\" (UID: \"e84fca0c-fb5a-450c-8b3a-3d378ff73299\") " pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.132471 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="310d8128-bf82-4d96-b65e-ef841b5fdbff" path="/var/lib/kubelet/pods/310d8128-bf82-4d96-b65e-ef841b5fdbff/volumes" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.132964 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.173724 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.272217 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.451921 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.454344 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.461570 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.461824 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.461943 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.462063 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.462222 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.462346 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.462452 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-d65p5" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.470148 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.539143 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.556808 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30","Type":"ContainerStarted","Data":"e8dbd23e592130d9f9e0aa16166488307f71f2c9100a85604fa7cf54d50b4bde"} Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.559197 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.562268 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"84cc4ef3-0b95-4274-90e0-34167e4ca917","Type":"ContainerStarted","Data":"f175cb3639af3ad399a908a8f2b30986230d607ccf30e686207748d048eef517"} Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.562344 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"84cc4ef3-0b95-4274-90e0-34167e4ca917","Type":"ContainerStarted","Data":"47ad50d16c5767ca9955e5bc0c41d246e997db6b1de40cae53632225009af523"} Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.576919 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="310d8128-bf82-4d96-b65e-ef841b5fdbff" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585696 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585747 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585776 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585812 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/198e95d7-987d-4bbc-9926-862ec7a2c323-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585832 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585882 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-config\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585944 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585964 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.585995 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tj62\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-kube-api-access-7tj62\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.586015 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687221 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687275 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687315 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tj62\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-kube-api-access-7tj62\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687335 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687395 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687436 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687463 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687497 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/198e95d7-987d-4bbc-9926-862ec7a2c323-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687518 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.687576 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-config\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.688791 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.689701 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.689956 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.701279 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-config\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.707540 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/198e95d7-987d-4bbc-9926-862ec7a2c323-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.707679 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.708237 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.708480 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.759514 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tj62\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-kube-api-access-7tj62\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.841070 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.841049526 podStartE2EDuration="1.841049526s" podCreationTimestamp="2026-01-29 08:19:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:19:43.624231464 +0000 UTC m=+6275.295726021" watchObservedRunningTime="2026-01-29 08:19:43.841049526 +0000 UTC m=+6275.512544083" Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.841558 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.842190 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:19:43 crc kubenswrapper[4861]: I0129 08:19:43.842285 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1714e62516c3654b726b6e648b22d36668061b0808474b9c02a0c5b1095bd45e/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.091914 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"prometheus-metric-storage-0\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.382124 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.591705 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"e84fca0c-fb5a-450c-8b3a-3d378ff73299","Type":"ContainerStarted","Data":"2ba06a86717f2f49b36818baab0e523d089e553481c5b0d9aa3bbf802b675771"} Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.604514 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30","Type":"ContainerStarted","Data":"79ee53fc25068d936388e588a9b0c08c84d421fdd27be05c522e1a9fb076467e"} Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.604656 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.605226 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.614626 4861 generic.go:334] "Generic (PLEG): container finished" podID="9ce50e42-2140-459d-9bfe-59c23bcb8ae2" containerID="a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54" exitCode=137 Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.615569 4861 scope.go:117] "RemoveContainer" containerID="a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.686152 4861 scope.go:117] "RemoveContainer" containerID="a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54" Jan 29 08:19:44 crc kubenswrapper[4861]: E0129 08:19:44.687605 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54\": container with ID starting with a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54 not found: ID does not exist" containerID="a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.687634 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54"} err="failed to get container status \"a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54\": rpc error: code = NotFound desc = could not find container \"a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54\": container with ID starting with a58fe7b30cc6e5ee1cca4b7ad206650a73a41a023cabf870dee59631bd546a54 not found: ID does not exist" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.711159 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgf7d\" (UniqueName: \"kubernetes.io/projected/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-kube-api-access-rgf7d\") pod \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.711280 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-combined-ca-bundle\") pod \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.711355 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config-secret\") pod \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.711587 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config\") pod \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\" (UID: \"9ce50e42-2140-459d-9bfe-59c23bcb8ae2\") " Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.736361 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-kube-api-access-rgf7d" (OuterVolumeSpecName: "kube-api-access-rgf7d") pod "9ce50e42-2140-459d-9bfe-59c23bcb8ae2" (UID: "9ce50e42-2140-459d-9bfe-59c23bcb8ae2"). InnerVolumeSpecName "kube-api-access-rgf7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.747248 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "9ce50e42-2140-459d-9bfe-59c23bcb8ae2" (UID: "9ce50e42-2140-459d-9bfe-59c23bcb8ae2"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.749752 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ce50e42-2140-459d-9bfe-59c23bcb8ae2" (UID: "9ce50e42-2140-459d-9bfe-59c23bcb8ae2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.814419 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.814448 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgf7d\" (UniqueName: \"kubernetes.io/projected/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-kube-api-access-rgf7d\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.814457 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.827042 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "9ce50e42-2140-459d-9bfe-59c23bcb8ae2" (UID: "9ce50e42-2140-459d-9bfe-59c23bcb8ae2"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.916372 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9ce50e42-2140-459d-9bfe-59c23bcb8ae2-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.992770 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.569026928 podStartE2EDuration="2.992753062s" podCreationTimestamp="2026-01-29 08:19:42 +0000 UTC" firstStartedPulling="2026-01-29 08:19:43.296845554 +0000 UTC m=+6274.968340111" lastFinishedPulling="2026-01-29 08:19:43.720571688 +0000 UTC m=+6275.392066245" observedRunningTime="2026-01-29 08:19:44.637416213 +0000 UTC m=+6276.308910780" watchObservedRunningTime="2026-01-29 08:19:44.992753062 +0000 UTC m=+6276.664247619" Jan 29 08:19:44 crc kubenswrapper[4861]: I0129 08:19:44.995457 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:19:45 crc kubenswrapper[4861]: I0129 08:19:45.131218 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ce50e42-2140-459d-9bfe-59c23bcb8ae2" path="/var/lib/kubelet/pods/9ce50e42-2140-459d-9bfe-59c23bcb8ae2/volumes" Jan 29 08:19:45 crc kubenswrapper[4861]: I0129 08:19:45.631680 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerStarted","Data":"551728abf8c61c23e3de8349d2dda0e286729d81f7c1a70c5e6944d5fe006ea9"} Jan 29 08:19:45 crc kubenswrapper[4861]: I0129 08:19:45.636723 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:19:50 crc kubenswrapper[4861]: I0129 08:19:50.690311 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerStarted","Data":"cb7932aeff4a54ccffce35ae68ad899e2f1cd376f3288210559d7ff74b250d8c"} Jan 29 08:19:50 crc kubenswrapper[4861]: I0129 08:19:50.692649 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"e84fca0c-fb5a-450c-8b3a-3d378ff73299","Type":"ContainerStarted","Data":"9e76dc859d3681aba5782e0ed31f17c7d857b50621e58ce146f3537b871482f6"} Jan 29 08:19:52 crc kubenswrapper[4861]: I0129 08:19:52.652731 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 08:19:57 crc kubenswrapper[4861]: I0129 08:19:57.765705 4861 generic.go:334] "Generic (PLEG): container finished" podID="e84fca0c-fb5a-450c-8b3a-3d378ff73299" containerID="9e76dc859d3681aba5782e0ed31f17c7d857b50621e58ce146f3537b871482f6" exitCode=0 Jan 29 08:19:57 crc kubenswrapper[4861]: I0129 08:19:57.765830 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"e84fca0c-fb5a-450c-8b3a-3d378ff73299","Type":"ContainerDied","Data":"9e76dc859d3681aba5782e0ed31f17c7d857b50621e58ce146f3537b871482f6"} Jan 29 08:19:58 crc kubenswrapper[4861]: I0129 08:19:58.785468 4861 generic.go:334] "Generic (PLEG): container finished" podID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerID="cb7932aeff4a54ccffce35ae68ad899e2f1cd376f3288210559d7ff74b250d8c" exitCode=0 Jan 29 08:19:58 crc kubenswrapper[4861]: I0129 08:19:58.785605 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerDied","Data":"cb7932aeff4a54ccffce35ae68ad899e2f1cd376f3288210559d7ff74b250d8c"} Jan 29 08:20:00 crc kubenswrapper[4861]: I0129 08:20:00.813180 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"e84fca0c-fb5a-450c-8b3a-3d378ff73299","Type":"ContainerStarted","Data":"acb3aa1bd0c6cc88533aeb34679925c3572af010b3df5bb093d3f63a3b6abe9b"} Jan 29 08:20:03 crc kubenswrapper[4861]: I0129 08:20:03.860479 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"e84fca0c-fb5a-450c-8b3a-3d378ff73299","Type":"ContainerStarted","Data":"9aca6145a7cee971ff020869f820afbaeb962ea1309d93a38837d195b03d89ac"} Jan 29 08:20:03 crc kubenswrapper[4861]: I0129 08:20:03.862621 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Jan 29 08:20:03 crc kubenswrapper[4861]: I0129 08:20:03.865778 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Jan 29 08:20:03 crc kubenswrapper[4861]: I0129 08:20:03.897877 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=5.849656579 podStartE2EDuration="21.897851918s" podCreationTimestamp="2026-01-29 08:19:42 +0000 UTC" firstStartedPulling="2026-01-29 08:19:43.877637619 +0000 UTC m=+6275.549132176" lastFinishedPulling="2026-01-29 08:19:59.925832958 +0000 UTC m=+6291.597327515" observedRunningTime="2026-01-29 08:20:03.886605692 +0000 UTC m=+6295.558100259" watchObservedRunningTime="2026-01-29 08:20:03.897851918 +0000 UTC m=+6295.569346485" Jan 29 08:20:04 crc kubenswrapper[4861]: I0129 08:20:04.874631 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerStarted","Data":"a54ff3d85389f33761c566fbb9c19bc367400e78dda3676e9a96f292b05be01c"} Jan 29 08:20:07 crc kubenswrapper[4861]: I0129 08:20:07.054049 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-zt52c"] Jan 29 08:20:07 crc kubenswrapper[4861]: I0129 08:20:07.070029 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-9082-account-create-update-82jff"] Jan 29 08:20:07 crc kubenswrapper[4861]: I0129 08:20:07.080054 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-zt52c"] Jan 29 08:20:07 crc kubenswrapper[4861]: I0129 08:20:07.089817 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-9082-account-create-update-82jff"] Jan 29 08:20:07 crc kubenswrapper[4861]: I0129 08:20:07.140934 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36d0e0cc-132a-48ad-83cc-15c5ffda48e1" path="/var/lib/kubelet/pods/36d0e0cc-132a-48ad-83cc-15c5ffda48e1/volumes" Jan 29 08:20:07 crc kubenswrapper[4861]: I0129 08:20:07.145752 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9c11f12-732d-470e-b8fa-29cbf4d977fb" path="/var/lib/kubelet/pods/b9c11f12-732d-470e-b8fa-29cbf4d977fb/volumes" Jan 29 08:20:09 crc kubenswrapper[4861]: I0129 08:20:09.943644 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerStarted","Data":"95597ecf432242c97469a3a1736d0957ea1b7ec4b9d8e423cb99372dd9253463"} Jan 29 08:20:12 crc kubenswrapper[4861]: I0129 08:20:12.987275 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerStarted","Data":"8172306d9ddca14f009c6d71ac80763be783dbe22f1c1b50d2195005cac8ce9c"} Jan 29 08:20:13 crc kubenswrapper[4861]: I0129 08:20:13.034057 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.7150613249999997 podStartE2EDuration="31.03402521s" podCreationTimestamp="2026-01-29 08:19:42 +0000 UTC" firstStartedPulling="2026-01-29 08:19:44.99991052 +0000 UTC m=+6276.671405077" lastFinishedPulling="2026-01-29 08:20:12.318874405 +0000 UTC m=+6303.990368962" observedRunningTime="2026-01-29 08:20:13.020389781 +0000 UTC m=+6304.691884408" watchObservedRunningTime="2026-01-29 08:20:13.03402521 +0000 UTC m=+6304.705519767" Jan 29 08:20:14 crc kubenswrapper[4861]: I0129 08:20:14.383767 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:14 crc kubenswrapper[4861]: I0129 08:20:14.384123 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:14 crc kubenswrapper[4861]: I0129 08:20:14.387488 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:15 crc kubenswrapper[4861]: I0129 08:20:15.020207 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:15 crc kubenswrapper[4861]: I0129 08:20:15.029943 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-rnh8s"] Jan 29 08:20:15 crc kubenswrapper[4861]: I0129 08:20:15.038897 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-rnh8s"] Jan 29 08:20:15 crc kubenswrapper[4861]: I0129 08:20:15.131011 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="215d3063-4d55-431e-a474-612c0ab49a24" path="/var/lib/kubelet/pods/215d3063-4d55-431e-a474-612c0ab49a24/volumes" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.814357 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.815136 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" containerName="openstackclient" containerID="cri-o://f175cb3639af3ad399a908a8f2b30986230d607ccf30e686207748d048eef517" gracePeriod=2 Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.823694 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.844711 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 08:20:16 crc kubenswrapper[4861]: E0129 08:20:16.845143 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" containerName="openstackclient" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.845158 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" containerName="openstackclient" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.845357 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" containerName="openstackclient" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.846012 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.849226 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" podUID="1476fc3b-74de-4991-8c45-b400d2f410f6" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.866307 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.999183 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1476fc3b-74de-4991-8c45-b400d2f410f6-openstack-config\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.999273 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1476fc3b-74de-4991-8c45-b400d2f410f6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.999352 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1476fc3b-74de-4991-8c45-b400d2f410f6-openstack-config-secret\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:16 crc kubenswrapper[4861]: I0129 08:20:16.999404 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjr2x\" (UniqueName: \"kubernetes.io/projected/1476fc3b-74de-4991-8c45-b400d2f410f6-kube-api-access-sjr2x\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.102058 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1476fc3b-74de-4991-8c45-b400d2f410f6-openstack-config\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.102168 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1476fc3b-74de-4991-8c45-b400d2f410f6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.102269 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1476fc3b-74de-4991-8c45-b400d2f410f6-openstack-config-secret\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.102316 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjr2x\" (UniqueName: \"kubernetes.io/projected/1476fc3b-74de-4991-8c45-b400d2f410f6-kube-api-access-sjr2x\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.103709 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/1476fc3b-74de-4991-8c45-b400d2f410f6-openstack-config\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.108349 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1476fc3b-74de-4991-8c45-b400d2f410f6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.115287 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/1476fc3b-74de-4991-8c45-b400d2f410f6-openstack-config-secret\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.134796 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjr2x\" (UniqueName: \"kubernetes.io/projected/1476fc3b-74de-4991-8c45-b400d2f410f6-kube-api-access-sjr2x\") pod \"openstackclient\" (UID: \"1476fc3b-74de-4991-8c45-b400d2f410f6\") " pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.172145 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.780358 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.801823 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.801933 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.803987 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.804848 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.899233 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.919117 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.919334 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-scripts\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.919555 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.919617 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-run-httpd\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.919737 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgdj8\" (UniqueName: \"kubernetes.io/projected/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-kube-api-access-fgdj8\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.919830 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-config-data\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:17 crc kubenswrapper[4861]: I0129 08:20:17.919923 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-log-httpd\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.021221 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-log-httpd\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.021615 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-log-httpd\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.021714 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.021829 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-scripts\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.021982 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.022085 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-run-httpd\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.022203 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgdj8\" (UniqueName: \"kubernetes.io/projected/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-kube-api-access-fgdj8\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.022327 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-config-data\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.022417 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-run-httpd\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.025187 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.025255 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-scripts\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.026654 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.026890 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-config-data\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.039256 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgdj8\" (UniqueName: \"kubernetes.io/projected/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-kube-api-access-fgdj8\") pod \"ceilometer-0\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.047888 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"1476fc3b-74de-4991-8c45-b400d2f410f6","Type":"ContainerStarted","Data":"25b02174771876521b6917d63d475bada6d8f7e732181b3e2a8c2f61736a95c8"} Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.133441 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.219834 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.220094 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="prometheus" containerID="cri-o://a54ff3d85389f33761c566fbb9c19bc367400e78dda3676e9a96f292b05be01c" gracePeriod=600 Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.220181 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="thanos-sidecar" containerID="cri-o://8172306d9ddca14f009c6d71ac80763be783dbe22f1c1b50d2195005cac8ce9c" gracePeriod=600 Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.220200 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="config-reloader" containerID="cri-o://95597ecf432242c97469a3a1736d0957ea1b7ec4b9d8e423cb99372dd9253463" gracePeriod=600 Jan 29 08:20:18 crc kubenswrapper[4861]: I0129 08:20:18.757789 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:18 crc kubenswrapper[4861]: W0129 08:20:18.761532 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5d7dd78_a16b_43a4_9e07_e943cfe55a5b.slice/crio-f81bbf41c3fe930a0f59561b4529225d91ffc76845f1aa0a47a76e35144609fd WatchSource:0}: Error finding container f81bbf41c3fe930a0f59561b4529225d91ffc76845f1aa0a47a76e35144609fd: Status 404 returned error can't find the container with id f81bbf41c3fe930a0f59561b4529225d91ffc76845f1aa0a47a76e35144609fd Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.057792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerStarted","Data":"f81bbf41c3fe930a0f59561b4529225d91ffc76845f1aa0a47a76e35144609fd"} Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.059629 4861 generic.go:334] "Generic (PLEG): container finished" podID="84cc4ef3-0b95-4274-90e0-34167e4ca917" containerID="f175cb3639af3ad399a908a8f2b30986230d607ccf30e686207748d048eef517" exitCode=137 Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.062308 4861 generic.go:334] "Generic (PLEG): container finished" podID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerID="8172306d9ddca14f009c6d71ac80763be783dbe22f1c1b50d2195005cac8ce9c" exitCode=0 Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.062330 4861 generic.go:334] "Generic (PLEG): container finished" podID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerID="95597ecf432242c97469a3a1736d0957ea1b7ec4b9d8e423cb99372dd9253463" exitCode=0 Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.062339 4861 generic.go:334] "Generic (PLEG): container finished" podID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerID="a54ff3d85389f33761c566fbb9c19bc367400e78dda3676e9a96f292b05be01c" exitCode=0 Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.062363 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerDied","Data":"8172306d9ddca14f009c6d71ac80763be783dbe22f1c1b50d2195005cac8ce9c"} Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.062385 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerDied","Data":"95597ecf432242c97469a3a1736d0957ea1b7ec4b9d8e423cb99372dd9253463"} Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.062418 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerDied","Data":"a54ff3d85389f33761c566fbb9c19bc367400e78dda3676e9a96f292b05be01c"} Jan 29 08:20:19 crc kubenswrapper[4861]: I0129 08:20:19.383833 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.1.152:9090/-/ready\": dial tcp 10.217.1.152:9090: connect: connection refused" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.073855 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"1476fc3b-74de-4991-8c45-b400d2f410f6","Type":"ContainerStarted","Data":"100cae57dd3cbaef1cec62362887f0656beef65153a6c810530803e4912a8151"} Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.761712 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.765739 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" podUID="1476fc3b-74de-4991-8c45-b400d2f410f6" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.784484 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.878664 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-combined-ca-bundle\") pod \"84cc4ef3-0b95-4274-90e0-34167e4ca917\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.878830 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.878856 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config-secret\") pod \"84cc4ef3-0b95-4274-90e0-34167e4ca917\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.878914 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-2\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.878962 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/198e95d7-987d-4bbc-9926-862ec7a2c323-config-out\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.878980 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-thanos-prometheus-http-client-file\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.879029 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config\") pod \"84cc4ef3-0b95-4274-90e0-34167e4ca917\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.879051 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-web-config\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.879089 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-config\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.879166 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tj62\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-kube-api-access-7tj62\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.879211 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-1\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.879265 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-0\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.879305 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2lcv\" (UniqueName: \"kubernetes.io/projected/84cc4ef3-0b95-4274-90e0-34167e4ca917-kube-api-access-d2lcv\") pod \"84cc4ef3-0b95-4274-90e0-34167e4ca917\" (UID: \"84cc4ef3-0b95-4274-90e0-34167e4ca917\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.879347 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-tls-assets\") pod \"198e95d7-987d-4bbc-9926-862ec7a2c323\" (UID: \"198e95d7-987d-4bbc-9926-862ec7a2c323\") " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.880449 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.880669 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.883261 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.887394 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.887475 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.888184 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-config" (OuterVolumeSpecName: "config") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.888227 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/198e95d7-987d-4bbc-9926-862ec7a2c323-config-out" (OuterVolumeSpecName: "config-out") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.902539 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-kube-api-access-7tj62" (OuterVolumeSpecName: "kube-api-access-7tj62") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "kube-api-access-7tj62". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.908233 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84cc4ef3-0b95-4274-90e0-34167e4ca917-kube-api-access-d2lcv" (OuterVolumeSpecName: "kube-api-access-d2lcv") pod "84cc4ef3-0b95-4274-90e0-34167e4ca917" (UID: "84cc4ef3-0b95-4274-90e0-34167e4ca917"). InnerVolumeSpecName "kube-api-access-d2lcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.928896 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.940656 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "84cc4ef3-0b95-4274-90e0-34167e4ca917" (UID: "84cc4ef3-0b95-4274-90e0-34167e4ca917"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.955694 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84cc4ef3-0b95-4274-90e0-34167e4ca917" (UID: "84cc4ef3-0b95-4274-90e0-34167e4ca917"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.959501 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-web-config" (OuterVolumeSpecName: "web-config") pod "198e95d7-987d-4bbc-9926-862ec7a2c323" (UID: "198e95d7-987d-4bbc-9926-862ec7a2c323"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.961277 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "84cc4ef3-0b95-4274-90e0-34167e4ca917" (UID: "84cc4ef3-0b95-4274-90e0-34167e4ca917"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981515 4861 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981553 4861 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/198e95d7-987d-4bbc-9926-862ec7a2c323-config-out\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981565 4861 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981578 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981589 4861 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-web-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981599 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/198e95d7-987d-4bbc-9926-862ec7a2c323-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981608 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tj62\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-kube-api-access-7tj62\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981617 4861 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981626 4861 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/198e95d7-987d-4bbc-9926-862ec7a2c323-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981635 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2lcv\" (UniqueName: \"kubernetes.io/projected/84cc4ef3-0b95-4274-90e0-34167e4ca917-kube-api-access-d2lcv\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981644 4861 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/198e95d7-987d-4bbc-9926-862ec7a2c323-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981652 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981685 4861 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") on node \"crc\" " Jan 29 08:20:20 crc kubenswrapper[4861]: I0129 08:20:20.981697 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84cc4ef3-0b95-4274-90e0-34167e4ca917-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.021333 4861 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.021498 4861 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3") on node "crc" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.083554 4861 reconciler_common.go:293] "Volume detached for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.084982 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"198e95d7-987d-4bbc-9926-862ec7a2c323","Type":"ContainerDied","Data":"551728abf8c61c23e3de8349d2dda0e286729d81f7c1a70c5e6944d5fe006ea9"} Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.085030 4861 scope.go:117] "RemoveContainer" containerID="8172306d9ddca14f009c6d71ac80763be783dbe22f1c1b50d2195005cac8ce9c" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.085027 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.086852 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerStarted","Data":"bfd7f1dd95a92935d4193a7580f4cee19713397945f49600a3147ef043cc4405"} Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.091616 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.105583 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" podUID="1476fc3b-74de-4991-8c45-b400d2f410f6" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.107965 4861 scope.go:117] "RemoveContainer" containerID="95597ecf432242c97469a3a1736d0957ea1b7ec4b9d8e423cb99372dd9253463" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.128747 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=5.128727114 podStartE2EDuration="5.128727114s" podCreationTimestamp="2026-01-29 08:20:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:20:21.127423129 +0000 UTC m=+6312.798917696" watchObservedRunningTime="2026-01-29 08:20:21.128727114 +0000 UTC m=+6312.800221661" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.134454 4861 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" podUID="1476fc3b-74de-4991-8c45-b400d2f410f6" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.137288 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84cc4ef3-0b95-4274-90e0-34167e4ca917" path="/var/lib/kubelet/pods/84cc4ef3-0b95-4274-90e0-34167e4ca917/volumes" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.139064 4861 scope.go:117] "RemoveContainer" containerID="a54ff3d85389f33761c566fbb9c19bc367400e78dda3676e9a96f292b05be01c" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.171126 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.179234 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.188841 4861 scope.go:117] "RemoveContainer" containerID="cb7932aeff4a54ccffce35ae68ad899e2f1cd376f3288210559d7ff74b250d8c" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.207783 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:20:21 crc kubenswrapper[4861]: E0129 08:20:21.208212 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="init-config-reloader" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.208230 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="init-config-reloader" Jan 29 08:20:21 crc kubenswrapper[4861]: E0129 08:20:21.208242 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="prometheus" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.208248 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="prometheus" Jan 29 08:20:21 crc kubenswrapper[4861]: E0129 08:20:21.208266 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="thanos-sidecar" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.208272 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="thanos-sidecar" Jan 29 08:20:21 crc kubenswrapper[4861]: E0129 08:20:21.208293 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="config-reloader" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.208299 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="config-reloader" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.208481 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="config-reloader" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.208507 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="prometheus" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.208521 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" containerName="thanos-sidecar" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.210468 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.213179 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.213399 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.213515 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.213620 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.213968 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-d65p5" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.214213 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.219600 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.220611 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.227831 4861 scope.go:117] "RemoveContainer" containerID="f175cb3639af3ad399a908a8f2b30986230d607ccf30e686207748d048eef517" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.237040 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.254048 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287613 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/15b58b86-fb90-42d4-8818-774def6f7b1c-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287666 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287707 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287724 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvz8m\" (UniqueName: \"kubernetes.io/projected/15b58b86-fb90-42d4-8818-774def6f7b1c-kube-api-access-dvz8m\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287753 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287771 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-config\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287818 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287874 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287893 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/15b58b86-fb90-42d4-8818-774def6f7b1c-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287916 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287944 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287965 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.287987 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394112 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394176 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvz8m\" (UniqueName: \"kubernetes.io/projected/15b58b86-fb90-42d4-8818-774def6f7b1c-kube-api-access-dvz8m\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394232 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394263 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-config\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394375 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394499 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394524 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/15b58b86-fb90-42d4-8818-774def6f7b1c-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394566 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394624 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394734 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394773 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394904 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/15b58b86-fb90-42d4-8818-774def6f7b1c-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.394948 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.399350 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.399450 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.401110 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/15b58b86-fb90-42d4-8818-774def6f7b1c-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.404005 4861 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.404154 4861 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1714e62516c3654b726b6e648b22d36668061b0808474b9c02a0c5b1095bd45e/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.405101 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-config\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.405638 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/15b58b86-fb90-42d4-8818-774def6f7b1c-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.405983 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/15b58b86-fb90-42d4-8818-774def6f7b1c-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.409505 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.409593 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.411747 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.412024 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.413565 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15b58b86-fb90-42d4-8818-774def6f7b1c-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.434774 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvz8m\" (UniqueName: \"kubernetes.io/projected/15b58b86-fb90-42d4-8818-774def6f7b1c-kube-api-access-dvz8m\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.453598 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-39398b21-9d6d-4305-b721-0dd4af7b67c3\") pod \"prometheus-metric-storage-0\" (UID: \"15b58b86-fb90-42d4-8818-774def6f7b1c\") " pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:21 crc kubenswrapper[4861]: I0129 08:20:21.536788 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:22 crc kubenswrapper[4861]: I0129 08:20:22.072492 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 29 08:20:22 crc kubenswrapper[4861]: W0129 08:20:22.077329 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15b58b86_fb90_42d4_8818_774def6f7b1c.slice/crio-455a57de8d1847b2ba055e44a8861c7553b05cf606877275dbb2e40762f2904d WatchSource:0}: Error finding container 455a57de8d1847b2ba055e44a8861c7553b05cf606877275dbb2e40762f2904d: Status 404 returned error can't find the container with id 455a57de8d1847b2ba055e44a8861c7553b05cf606877275dbb2e40762f2904d Jan 29 08:20:22 crc kubenswrapper[4861]: I0129 08:20:22.104171 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerStarted","Data":"16d1f78b65bb96f8bfc3043acff0a3cbe3f8e3201c22a9c500a3466d98683326"} Jan 29 08:20:22 crc kubenswrapper[4861]: I0129 08:20:22.108060 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"15b58b86-fb90-42d4-8818-774def6f7b1c","Type":"ContainerStarted","Data":"455a57de8d1847b2ba055e44a8861c7553b05cf606877275dbb2e40762f2904d"} Jan 29 08:20:23 crc kubenswrapper[4861]: I0129 08:20:23.132447 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="198e95d7-987d-4bbc-9926-862ec7a2c323" path="/var/lib/kubelet/pods/198e95d7-987d-4bbc-9926-862ec7a2c323/volumes" Jan 29 08:20:23 crc kubenswrapper[4861]: I0129 08:20:23.135797 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerStarted","Data":"f3230a98fae560b0683307ccbfe2462bf7f0d5c64771c336d8439cd2812b8319"} Jan 29 08:20:25 crc kubenswrapper[4861]: I0129 08:20:25.150543 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerStarted","Data":"813ffc1389cbe4e8b254f3294b8768fc32fee7e2ea1518eea345825ed00d8386"} Jan 29 08:20:25 crc kubenswrapper[4861]: I0129 08:20:25.151013 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 08:20:26 crc kubenswrapper[4861]: I0129 08:20:26.162395 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"15b58b86-fb90-42d4-8818-774def6f7b1c","Type":"ContainerStarted","Data":"70e6777bdfcfd6021f270220d8f92cfcce175b8d8739ebdf98f11c2b14fcb07c"} Jan 29 08:20:26 crc kubenswrapper[4861]: I0129 08:20:26.191193 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.936365953 podStartE2EDuration="9.191172982s" podCreationTimestamp="2026-01-29 08:20:17 +0000 UTC" firstStartedPulling="2026-01-29 08:20:18.763764244 +0000 UTC m=+6310.435258801" lastFinishedPulling="2026-01-29 08:20:24.018571273 +0000 UTC m=+6315.690065830" observedRunningTime="2026-01-29 08:20:25.182134385 +0000 UTC m=+6316.853628972" watchObservedRunningTime="2026-01-29 08:20:26.191172982 +0000 UTC m=+6317.862667539" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.641820 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-bfv2w"] Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.643602 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.655308 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-bfv2w"] Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.738454 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-operator-scripts\") pod \"aodh-db-create-bfv2w\" (UID: \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\") " pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.738579 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2mnb\" (UniqueName: \"kubernetes.io/projected/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-kube-api-access-h2mnb\") pod \"aodh-db-create-bfv2w\" (UID: \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\") " pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.750843 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-8ed8-account-create-update-qc2hx"] Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.752454 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.762816 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-8ed8-account-create-update-qc2hx"] Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.798623 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.841834 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnf2w\" (UniqueName: \"kubernetes.io/projected/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-kube-api-access-pnf2w\") pod \"aodh-8ed8-account-create-update-qc2hx\" (UID: \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\") " pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.842209 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-operator-scripts\") pod \"aodh-db-create-bfv2w\" (UID: \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\") " pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.842252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-operator-scripts\") pod \"aodh-8ed8-account-create-update-qc2hx\" (UID: \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\") " pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.842330 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2mnb\" (UniqueName: \"kubernetes.io/projected/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-kube-api-access-h2mnb\") pod \"aodh-db-create-bfv2w\" (UID: \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\") " pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.844631 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-operator-scripts\") pod \"aodh-db-create-bfv2w\" (UID: \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\") " pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.883426 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2mnb\" (UniqueName: \"kubernetes.io/projected/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-kube-api-access-h2mnb\") pod \"aodh-db-create-bfv2w\" (UID: \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\") " pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.944338 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnf2w\" (UniqueName: \"kubernetes.io/projected/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-kube-api-access-pnf2w\") pod \"aodh-8ed8-account-create-update-qc2hx\" (UID: \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\") " pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.944473 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-operator-scripts\") pod \"aodh-8ed8-account-create-update-qc2hx\" (UID: \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\") " pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.945221 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-operator-scripts\") pod \"aodh-8ed8-account-create-update-qc2hx\" (UID: \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\") " pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.960544 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnf2w\" (UniqueName: \"kubernetes.io/projected/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-kube-api-access-pnf2w\") pod \"aodh-8ed8-account-create-update-qc2hx\" (UID: \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\") " pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:27 crc kubenswrapper[4861]: I0129 08:20:27.963586 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:28 crc kubenswrapper[4861]: I0129 08:20:28.132786 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:28 crc kubenswrapper[4861]: I0129 08:20:28.476509 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-bfv2w"] Jan 29 08:20:28 crc kubenswrapper[4861]: I0129 08:20:28.627784 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-8ed8-account-create-update-qc2hx"] Jan 29 08:20:28 crc kubenswrapper[4861]: W0129 08:20:28.635655 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28f80e2d_ad83_4ee1_b4f6_6de5197c4b0b.slice/crio-974312e5ee90eba2c41c148f8049c17b3eec633a71b4bdb5b7d7c9d6a9224d18 WatchSource:0}: Error finding container 974312e5ee90eba2c41c148f8049c17b3eec633a71b4bdb5b7d7c9d6a9224d18: Status 404 returned error can't find the container with id 974312e5ee90eba2c41c148f8049c17b3eec633a71b4bdb5b7d7c9d6a9224d18 Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.201779 4861 generic.go:334] "Generic (PLEG): container finished" podID="28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b" containerID="d5efa4905b44535bdb9819b6076d819575eb23a73b969bb50c940bd3147ff0a2" exitCode=0 Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.201839 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8ed8-account-create-update-qc2hx" event={"ID":"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b","Type":"ContainerDied","Data":"d5efa4905b44535bdb9819b6076d819575eb23a73b969bb50c940bd3147ff0a2"} Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.201869 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8ed8-account-create-update-qc2hx" event={"ID":"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b","Type":"ContainerStarted","Data":"974312e5ee90eba2c41c148f8049c17b3eec633a71b4bdb5b7d7c9d6a9224d18"} Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.203931 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b" containerID="c2fa11e106c7ee126d3c44aecbe3c2d32eaeb83ee7237ec177e1dc7ce5b164a5" exitCode=0 Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.203975 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-bfv2w" event={"ID":"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b","Type":"ContainerDied","Data":"c2fa11e106c7ee126d3c44aecbe3c2d32eaeb83ee7237ec177e1dc7ce5b164a5"} Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.204000 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-bfv2w" event={"ID":"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b","Type":"ContainerStarted","Data":"b58aec93f976e0212b743d0f8b7681b5ef581d0c82140dfca8ca9284844a1281"} Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.608970 4861 scope.go:117] "RemoveContainer" containerID="24e63608206df2b802c8a67ec96560fd65d464a572c46d304fa7aa32556811d1" Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.641058 4861 scope.go:117] "RemoveContainer" containerID="c52e6c2544058ee6c149c5574f3bd6f297f8a376a4a929bddd0eb95ffe91fd29" Jan 29 08:20:29 crc kubenswrapper[4861]: I0129 08:20:29.696900 4861 scope.go:117] "RemoveContainer" containerID="441c2ee0f6d03a6d2320f4d04b8cccd02ea0e54d69bda0b008f651022bf5bf66" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.630148 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.630476 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.715965 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.724392 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.812014 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-operator-scripts\") pod \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\" (UID: \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\") " Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.812201 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnf2w\" (UniqueName: \"kubernetes.io/projected/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-kube-api-access-pnf2w\") pod \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\" (UID: \"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b\") " Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.812278 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2mnb\" (UniqueName: \"kubernetes.io/projected/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-kube-api-access-h2mnb\") pod \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\" (UID: \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\") " Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.812418 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-operator-scripts\") pod \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\" (UID: \"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b\") " Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.813019 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b" (UID: "5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.813229 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b" (UID: "28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.818606 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-kube-api-access-pnf2w" (OuterVolumeSpecName: "kube-api-access-pnf2w") pod "28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b" (UID: "28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b"). InnerVolumeSpecName "kube-api-access-pnf2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.819031 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-kube-api-access-h2mnb" (OuterVolumeSpecName: "kube-api-access-h2mnb") pod "5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b" (UID: "5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b"). InnerVolumeSpecName "kube-api-access-h2mnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.915484 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.915518 4861 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.915529 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnf2w\" (UniqueName: \"kubernetes.io/projected/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b-kube-api-access-pnf2w\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:30 crc kubenswrapper[4861]: I0129 08:20:30.915538 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2mnb\" (UniqueName: \"kubernetes.io/projected/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b-kube-api-access-h2mnb\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:31 crc kubenswrapper[4861]: I0129 08:20:31.235644 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8ed8-account-create-update-qc2hx" Jan 29 08:20:31 crc kubenswrapper[4861]: I0129 08:20:31.235641 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8ed8-account-create-update-qc2hx" event={"ID":"28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b","Type":"ContainerDied","Data":"974312e5ee90eba2c41c148f8049c17b3eec633a71b4bdb5b7d7c9d6a9224d18"} Jan 29 08:20:31 crc kubenswrapper[4861]: I0129 08:20:31.235726 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="974312e5ee90eba2c41c148f8049c17b3eec633a71b4bdb5b7d7c9d6a9224d18" Jan 29 08:20:31 crc kubenswrapper[4861]: I0129 08:20:31.238206 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-bfv2w" event={"ID":"5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b","Type":"ContainerDied","Data":"b58aec93f976e0212b743d0f8b7681b5ef581d0c82140dfca8ca9284844a1281"} Jan 29 08:20:31 crc kubenswrapper[4861]: I0129 08:20:31.238258 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b58aec93f976e0212b743d0f8b7681b5ef581d0c82140dfca8ca9284844a1281" Jan 29 08:20:31 crc kubenswrapper[4861]: I0129 08:20:31.238302 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-bfv2w" Jan 29 08:20:32 crc kubenswrapper[4861]: I0129 08:20:32.291334 4861 generic.go:334] "Generic (PLEG): container finished" podID="15b58b86-fb90-42d4-8818-774def6f7b1c" containerID="70e6777bdfcfd6021f270220d8f92cfcce175b8d8739ebdf98f11c2b14fcb07c" exitCode=0 Jan 29 08:20:32 crc kubenswrapper[4861]: I0129 08:20:32.291659 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"15b58b86-fb90-42d4-8818-774def6f7b1c","Type":"ContainerDied","Data":"70e6777bdfcfd6021f270220d8f92cfcce175b8d8739ebdf98f11c2b14fcb07c"} Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.071797 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-4mhs8"] Jan 29 08:20:33 crc kubenswrapper[4861]: E0129 08:20:33.072533 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b" containerName="mariadb-account-create-update" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.072550 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b" containerName="mariadb-account-create-update" Jan 29 08:20:33 crc kubenswrapper[4861]: E0129 08:20:33.072561 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b" containerName="mariadb-database-create" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.072567 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b" containerName="mariadb-database-create" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.072757 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b" containerName="mariadb-database-create" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.072775 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b" containerName="mariadb-account-create-update" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.073471 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.076813 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.076819 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.077052 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-jnvc4" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.077434 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.087861 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-4mhs8"] Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.176352 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-scripts\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.176431 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8khw7\" (UniqueName: \"kubernetes.io/projected/40eac315-cbc0-4b34-b276-bb81c3df7afe-kube-api-access-8khw7\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.178343 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-config-data\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.178781 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-combined-ca-bundle\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.281255 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-config-data\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.281314 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-combined-ca-bundle\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.282221 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-scripts\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.282316 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8khw7\" (UniqueName: \"kubernetes.io/projected/40eac315-cbc0-4b34-b276-bb81c3df7afe-kube-api-access-8khw7\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.286528 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-combined-ca-bundle\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.288714 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-config-data\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.291469 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-scripts\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.301643 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"15b58b86-fb90-42d4-8818-774def6f7b1c","Type":"ContainerStarted","Data":"ca8e25ded78455d0975dbda4a09d92d903c26eb572a40358f5736f1717fff9b0"} Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.304718 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8khw7\" (UniqueName: \"kubernetes.io/projected/40eac315-cbc0-4b34-b276-bb81c3df7afe-kube-api-access-8khw7\") pod \"aodh-db-sync-4mhs8\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.396522 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:33 crc kubenswrapper[4861]: I0129 08:20:33.863878 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-4mhs8"] Jan 29 08:20:33 crc kubenswrapper[4861]: W0129 08:20:33.868370 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40eac315_cbc0_4b34_b276_bb81c3df7afe.slice/crio-57292fb26bd1402ffa50e7807dd1e3857a0db1915fc48c6977426fcfc26a69f5 WatchSource:0}: Error finding container 57292fb26bd1402ffa50e7807dd1e3857a0db1915fc48c6977426fcfc26a69f5: Status 404 returned error can't find the container with id 57292fb26bd1402ffa50e7807dd1e3857a0db1915fc48c6977426fcfc26a69f5 Jan 29 08:20:34 crc kubenswrapper[4861]: I0129 08:20:34.312950 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-4mhs8" event={"ID":"40eac315-cbc0-4b34-b276-bb81c3df7afe","Type":"ContainerStarted","Data":"57292fb26bd1402ffa50e7807dd1e3857a0db1915fc48c6977426fcfc26a69f5"} Jan 29 08:20:36 crc kubenswrapper[4861]: I0129 08:20:36.392714 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"15b58b86-fb90-42d4-8818-774def6f7b1c","Type":"ContainerStarted","Data":"1025613700a042f4216ed9da6517f253c5d3e923ebd1f8c8e20af990b96de5dc"} Jan 29 08:20:39 crc kubenswrapper[4861]: I0129 08:20:39.426948 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"15b58b86-fb90-42d4-8818-774def6f7b1c","Type":"ContainerStarted","Data":"2df2567489d84c4316a6734f711a626afe8f66dae04e3408c54b2968dfc9679c"} Jan 29 08:20:39 crc kubenswrapper[4861]: I0129 08:20:39.431007 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-4mhs8" event={"ID":"40eac315-cbc0-4b34-b276-bb81c3df7afe","Type":"ContainerStarted","Data":"17f8da8584b2c53558311b3de4186a2ffd5b8417b345d14e1571c8fad788ee07"} Jan 29 08:20:39 crc kubenswrapper[4861]: I0129 08:20:39.461843 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=18.461824441 podStartE2EDuration="18.461824441s" podCreationTimestamp="2026-01-29 08:20:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:20:39.452375412 +0000 UTC m=+6331.123870019" watchObservedRunningTime="2026-01-29 08:20:39.461824441 +0000 UTC m=+6331.133319008" Jan 29 08:20:39 crc kubenswrapper[4861]: I0129 08:20:39.478474 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-4mhs8" podStartSLOduration=1.4451958679999999 podStartE2EDuration="6.478456448s" podCreationTimestamp="2026-01-29 08:20:33 +0000 UTC" firstStartedPulling="2026-01-29 08:20:33.87092927 +0000 UTC m=+6325.542423827" lastFinishedPulling="2026-01-29 08:20:38.90418985 +0000 UTC m=+6330.575684407" observedRunningTime="2026-01-29 08:20:39.473668932 +0000 UTC m=+6331.145163499" watchObservedRunningTime="2026-01-29 08:20:39.478456448 +0000 UTC m=+6331.149951005" Jan 29 08:20:41 crc kubenswrapper[4861]: I0129 08:20:41.537962 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:42 crc kubenswrapper[4861]: I0129 08:20:42.464952 4861 generic.go:334] "Generic (PLEG): container finished" podID="40eac315-cbc0-4b34-b276-bb81c3df7afe" containerID="17f8da8584b2c53558311b3de4186a2ffd5b8417b345d14e1571c8fad788ee07" exitCode=0 Jan 29 08:20:42 crc kubenswrapper[4861]: I0129 08:20:42.465065 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-4mhs8" event={"ID":"40eac315-cbc0-4b34-b276-bb81c3df7afe","Type":"ContainerDied","Data":"17f8da8584b2c53558311b3de4186a2ffd5b8417b345d14e1571c8fad788ee07"} Jan 29 08:20:43 crc kubenswrapper[4861]: I0129 08:20:43.972643 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.049431 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7bed-account-create-update-zj98v"] Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.059397 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-h4h6l"] Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.069849 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-7bed-account-create-update-zj98v"] Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.078484 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-h4h6l"] Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.131725 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-config-data\") pod \"40eac315-cbc0-4b34-b276-bb81c3df7afe\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.132266 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8khw7\" (UniqueName: \"kubernetes.io/projected/40eac315-cbc0-4b34-b276-bb81c3df7afe-kube-api-access-8khw7\") pod \"40eac315-cbc0-4b34-b276-bb81c3df7afe\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.132546 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-combined-ca-bundle\") pod \"40eac315-cbc0-4b34-b276-bb81c3df7afe\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.132593 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-scripts\") pod \"40eac315-cbc0-4b34-b276-bb81c3df7afe\" (UID: \"40eac315-cbc0-4b34-b276-bb81c3df7afe\") " Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.141542 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-scripts" (OuterVolumeSpecName: "scripts") pod "40eac315-cbc0-4b34-b276-bb81c3df7afe" (UID: "40eac315-cbc0-4b34-b276-bb81c3df7afe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.144270 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40eac315-cbc0-4b34-b276-bb81c3df7afe-kube-api-access-8khw7" (OuterVolumeSpecName: "kube-api-access-8khw7") pod "40eac315-cbc0-4b34-b276-bb81c3df7afe" (UID: "40eac315-cbc0-4b34-b276-bb81c3df7afe"). InnerVolumeSpecName "kube-api-access-8khw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.164342 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40eac315-cbc0-4b34-b276-bb81c3df7afe" (UID: "40eac315-cbc0-4b34-b276-bb81c3df7afe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.170269 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-config-data" (OuterVolumeSpecName: "config-data") pod "40eac315-cbc0-4b34-b276-bb81c3df7afe" (UID: "40eac315-cbc0-4b34-b276-bb81c3df7afe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.234992 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8khw7\" (UniqueName: \"kubernetes.io/projected/40eac315-cbc0-4b34-b276-bb81c3df7afe-kube-api-access-8khw7\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.235024 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.235033 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.235046 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40eac315-cbc0-4b34-b276-bb81c3df7afe-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.490448 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-4mhs8" event={"ID":"40eac315-cbc0-4b34-b276-bb81c3df7afe","Type":"ContainerDied","Data":"57292fb26bd1402ffa50e7807dd1e3857a0db1915fc48c6977426fcfc26a69f5"} Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.490743 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57292fb26bd1402ffa50e7807dd1e3857a0db1915fc48c6977426fcfc26a69f5" Jan 29 08:20:44 crc kubenswrapper[4861]: I0129 08:20:44.490841 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-4mhs8" Jan 29 08:20:45 crc kubenswrapper[4861]: I0129 08:20:45.133592 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6343814f-8268-4820-98ca-ed7d2d2f33d0" path="/var/lib/kubelet/pods/6343814f-8268-4820-98ca-ed7d2d2f33d0/volumes" Jan 29 08:20:45 crc kubenswrapper[4861]: I0129 08:20:45.134183 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cffe5493-2381-493d-85b9-07b863ae8d2c" path="/var/lib/kubelet/pods/cffe5493-2381-493d-85b9-07b863ae8d2c/volumes" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.625354 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 29 08:20:47 crc kubenswrapper[4861]: E0129 08:20:47.626201 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40eac315-cbc0-4b34-b276-bb81c3df7afe" containerName="aodh-db-sync" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.626215 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="40eac315-cbc0-4b34-b276-bb81c3df7afe" containerName="aodh-db-sync" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.626439 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="40eac315-cbc0-4b34-b276-bb81c3df7afe" containerName="aodh-db-sync" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.628773 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.631142 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.631615 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-jnvc4" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.631698 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.653046 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.824699 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-scripts\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.824780 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.824818 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-config-data\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.824845 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksl2r\" (UniqueName: \"kubernetes.io/projected/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-kube-api-access-ksl2r\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.927528 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-scripts\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.927620 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.927653 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-config-data\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.927677 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksl2r\" (UniqueName: \"kubernetes.io/projected/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-kube-api-access-ksl2r\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.950159 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-config-data\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.968825 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-scripts\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.969142 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksl2r\" (UniqueName: \"kubernetes.io/projected/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-kube-api-access-ksl2r\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:47 crc kubenswrapper[4861]: I0129 08:20:47.969658 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " pod="openstack/aodh-0" Jan 29 08:20:48 crc kubenswrapper[4861]: I0129 08:20:48.144040 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 08:20:48 crc kubenswrapper[4861]: I0129 08:20:48.250021 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 29 08:20:48 crc kubenswrapper[4861]: I0129 08:20:48.814503 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 29 08:20:49 crc kubenswrapper[4861]: I0129 08:20:49.531980 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerStarted","Data":"c695a0378d3f2faf0bd85300df7bd8a7d5c1ee3a8e26ebfed5f3efb333d74953"} Jan 29 08:20:50 crc kubenswrapper[4861]: I0129 08:20:50.551834 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerStarted","Data":"ef261af6becae1850cc3db765380ff8fa6ecc9eebc06e337ed4d9895df7df9cf"} Jan 29 08:20:50 crc kubenswrapper[4861]: I0129 08:20:50.647667 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:50 crc kubenswrapper[4861]: I0129 08:20:50.647926 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="ceilometer-central-agent" containerID="cri-o://bfd7f1dd95a92935d4193a7580f4cee19713397945f49600a3147ef043cc4405" gracePeriod=30 Jan 29 08:20:50 crc kubenswrapper[4861]: I0129 08:20:50.647986 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="proxy-httpd" containerID="cri-o://813ffc1389cbe4e8b254f3294b8768fc32fee7e2ea1518eea345825ed00d8386" gracePeriod=30 Jan 29 08:20:50 crc kubenswrapper[4861]: I0129 08:20:50.648048 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="ceilometer-notification-agent" containerID="cri-o://16d1f78b65bb96f8bfc3043acff0a3cbe3f8e3201c22a9c500a3466d98683326" gracePeriod=30 Jan 29 08:20:50 crc kubenswrapper[4861]: I0129 08:20:50.648062 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="sg-core" containerID="cri-o://f3230a98fae560b0683307ccbfe2462bf7f0d5c64771c336d8439cd2812b8319" gracePeriod=30 Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.031783 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-vgw88"] Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.040303 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-vgw88"] Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.129761 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63a88d68-4cb1-4baa-b8a3-5568f520818e" path="/var/lib/kubelet/pods/63a88d68-4cb1-4baa-b8a3-5568f520818e/volumes" Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.356210 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.546295 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.567402 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.616479 4861 generic.go:334] "Generic (PLEG): container finished" podID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerID="813ffc1389cbe4e8b254f3294b8768fc32fee7e2ea1518eea345825ed00d8386" exitCode=0 Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.616511 4861 generic.go:334] "Generic (PLEG): container finished" podID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerID="f3230a98fae560b0683307ccbfe2462bf7f0d5c64771c336d8439cd2812b8319" exitCode=2 Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.616519 4861 generic.go:334] "Generic (PLEG): container finished" podID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerID="bfd7f1dd95a92935d4193a7580f4cee19713397945f49600a3147ef043cc4405" exitCode=0 Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.616676 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerDied","Data":"813ffc1389cbe4e8b254f3294b8768fc32fee7e2ea1518eea345825ed00d8386"} Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.616717 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerDied","Data":"f3230a98fae560b0683307ccbfe2462bf7f0d5c64771c336d8439cd2812b8319"} Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.616728 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerDied","Data":"bfd7f1dd95a92935d4193a7580f4cee19713397945f49600a3147ef043cc4405"} Jan 29 08:20:51 crc kubenswrapper[4861]: I0129 08:20:51.639041 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 29 08:20:52 crc kubenswrapper[4861]: I0129 08:20:52.631036 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerStarted","Data":"17f0e10e4739deeb410e96824d859c3b1d5e0335d0547806b989d3c945a19ee8"} Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.647778 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerStarted","Data":"4b2e7e64ba8edbd065eb932e07bd509566ed6dc855516c8c71b1fd5e29b7606e"} Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.655246 4861 generic.go:334] "Generic (PLEG): container finished" podID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerID="16d1f78b65bb96f8bfc3043acff0a3cbe3f8e3201c22a9c500a3466d98683326" exitCode=0 Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.655289 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerDied","Data":"16d1f78b65bb96f8bfc3043acff0a3cbe3f8e3201c22a9c500a3466d98683326"} Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.843928 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.968500 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-config-data\") pod \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.968585 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-sg-core-conf-yaml\") pod \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.968707 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-run-httpd\") pod \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.968778 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-combined-ca-bundle\") pod \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.968831 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-scripts\") pod \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.968862 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-log-httpd\") pod \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.968882 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgdj8\" (UniqueName: \"kubernetes.io/projected/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-kube-api-access-fgdj8\") pod \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\" (UID: \"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b\") " Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.969277 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" (UID: "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.969647 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.970123 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" (UID: "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.977927 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-scripts" (OuterVolumeSpecName: "scripts") pod "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" (UID: "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:53 crc kubenswrapper[4861]: I0129 08:20:53.981017 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-kube-api-access-fgdj8" (OuterVolumeSpecName: "kube-api-access-fgdj8") pod "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" (UID: "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b"). InnerVolumeSpecName "kube-api-access-fgdj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.014063 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" (UID: "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.071664 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" (UID: "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.072870 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.072901 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.072915 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.072926 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.072940 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgdj8\" (UniqueName: \"kubernetes.io/projected/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-kube-api-access-fgdj8\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.137160 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-config-data" (OuterVolumeSpecName: "config-data") pod "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" (UID: "c5d7dd78-a16b-43a4-9e07-e943cfe55a5b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.175128 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.695743 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d7dd78-a16b-43a4-9e07-e943cfe55a5b","Type":"ContainerDied","Data":"f81bbf41c3fe930a0f59561b4529225d91ffc76845f1aa0a47a76e35144609fd"} Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.696037 4861 scope.go:117] "RemoveContainer" containerID="813ffc1389cbe4e8b254f3294b8768fc32fee7e2ea1518eea345825ed00d8386" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.696187 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.749144 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.759398 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.780245 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:54 crc kubenswrapper[4861]: E0129 08:20:54.780909 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="ceilometer-central-agent" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.780929 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="ceilometer-central-agent" Jan 29 08:20:54 crc kubenswrapper[4861]: E0129 08:20:54.780943 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="sg-core" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.780951 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="sg-core" Jan 29 08:20:54 crc kubenswrapper[4861]: E0129 08:20:54.780980 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="proxy-httpd" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.780987 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="proxy-httpd" Jan 29 08:20:54 crc kubenswrapper[4861]: E0129 08:20:54.780999 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="ceilometer-notification-agent" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.781005 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="ceilometer-notification-agent" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.781216 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="proxy-httpd" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.781237 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="ceilometer-central-agent" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.781248 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="ceilometer-notification-agent" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.781265 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" containerName="sg-core" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.783549 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.786702 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.789150 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.794581 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.908137 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-log-httpd\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.908192 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-config-data\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.908239 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-run-httpd\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.908352 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94gzn\" (UniqueName: \"kubernetes.io/projected/5576e837-8033-4a6c-8ad8-9ddc7477846f-kube-api-access-94gzn\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.908376 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.908414 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-scripts\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:54 crc kubenswrapper[4861]: I0129 08:20:54.908460 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.010336 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94gzn\" (UniqueName: \"kubernetes.io/projected/5576e837-8033-4a6c-8ad8-9ddc7477846f-kube-api-access-94gzn\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.010388 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.010436 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-scripts\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.010495 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.010578 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-log-httpd\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.010606 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-config-data\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.010647 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-run-httpd\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.011439 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-run-httpd\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.013855 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-log-httpd\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.018456 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-scripts\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.020205 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.026719 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.027246 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-config-data\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.028828 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94gzn\" (UniqueName: \"kubernetes.io/projected/5576e837-8033-4a6c-8ad8-9ddc7477846f-kube-api-access-94gzn\") pod \"ceilometer-0\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.119458 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.132960 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5d7dd78-a16b-43a4-9e07-e943cfe55a5b" path="/var/lib/kubelet/pods/c5d7dd78-a16b-43a4-9e07-e943cfe55a5b/volumes" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.193840 4861 scope.go:117] "RemoveContainer" containerID="f3230a98fae560b0683307ccbfe2462bf7f0d5c64771c336d8439cd2812b8319" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.240555 4861 scope.go:117] "RemoveContainer" containerID="16d1f78b65bb96f8bfc3043acff0a3cbe3f8e3201c22a9c500a3466d98683326" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.352910 4861 scope.go:117] "RemoveContainer" containerID="bfd7f1dd95a92935d4193a7580f4cee19713397945f49600a3147ef043cc4405" Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.707613 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerStarted","Data":"71c86157784368869146a22be5d88161ba994f35fb4fcb70e45efd99c6142001"} Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.707736 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-api" containerID="cri-o://ef261af6becae1850cc3db765380ff8fa6ecc9eebc06e337ed4d9895df7df9cf" gracePeriod=30 Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.707758 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-listener" containerID="cri-o://71c86157784368869146a22be5d88161ba994f35fb4fcb70e45efd99c6142001" gracePeriod=30 Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.707913 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-notifier" containerID="cri-o://4b2e7e64ba8edbd065eb932e07bd509566ed6dc855516c8c71b1fd5e29b7606e" gracePeriod=30 Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.707927 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-evaluator" containerID="cri-o://17f0e10e4739deeb410e96824d859c3b1d5e0335d0547806b989d3c945a19ee8" gracePeriod=30 Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.734642 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.290333909 podStartE2EDuration="8.734625953s" podCreationTimestamp="2026-01-29 08:20:47 +0000 UTC" firstStartedPulling="2026-01-29 08:20:48.809388146 +0000 UTC m=+6340.480882703" lastFinishedPulling="2026-01-29 08:20:55.25368019 +0000 UTC m=+6346.925174747" observedRunningTime="2026-01-29 08:20:55.726363426 +0000 UTC m=+6347.397857983" watchObservedRunningTime="2026-01-29 08:20:55.734625953 +0000 UTC m=+6347.406120510" Jan 29 08:20:55 crc kubenswrapper[4861]: W0129 08:20:55.844470 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5576e837_8033_4a6c_8ad8_9ddc7477846f.slice/crio-455c4b9c1fd940668158d2e073664c3280f3b58f24f92e625570160e6780cdab WatchSource:0}: Error finding container 455c4b9c1fd940668158d2e073664c3280f3b58f24f92e625570160e6780cdab: Status 404 returned error can't find the container with id 455c4b9c1fd940668158d2e073664c3280f3b58f24f92e625570160e6780cdab Jan 29 08:20:55 crc kubenswrapper[4861]: I0129 08:20:55.861511 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.252855 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.255137 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30" containerName="kube-state-metrics" containerID="cri-o://79ee53fc25068d936388e588a9b0c08c84d421fdd27be05c522e1a9fb076467e" gracePeriod=30 Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.722731 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerStarted","Data":"455c4b9c1fd940668158d2e073664c3280f3b58f24f92e625570160e6780cdab"} Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.725259 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30" containerID="79ee53fc25068d936388e588a9b0c08c84d421fdd27be05c522e1a9fb076467e" exitCode=2 Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.725300 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30","Type":"ContainerDied","Data":"79ee53fc25068d936388e588a9b0c08c84d421fdd27be05c522e1a9fb076467e"} Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.733529 4861 generic.go:334] "Generic (PLEG): container finished" podID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerID="4b2e7e64ba8edbd065eb932e07bd509566ed6dc855516c8c71b1fd5e29b7606e" exitCode=0 Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.733562 4861 generic.go:334] "Generic (PLEG): container finished" podID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerID="17f0e10e4739deeb410e96824d859c3b1d5e0335d0547806b989d3c945a19ee8" exitCode=0 Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.733570 4861 generic.go:334] "Generic (PLEG): container finished" podID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerID="ef261af6becae1850cc3db765380ff8fa6ecc9eebc06e337ed4d9895df7df9cf" exitCode=0 Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.733587 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerDied","Data":"4b2e7e64ba8edbd065eb932e07bd509566ed6dc855516c8c71b1fd5e29b7606e"} Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.733608 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerDied","Data":"17f0e10e4739deeb410e96824d859c3b1d5e0335d0547806b989d3c945a19ee8"} Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.733616 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerDied","Data":"ef261af6becae1850cc3db765380ff8fa6ecc9eebc06e337ed4d9895df7df9cf"} Jan 29 08:20:56 crc kubenswrapper[4861]: I0129 08:20:56.974963 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.082568 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xgbh\" (UniqueName: \"kubernetes.io/projected/5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30-kube-api-access-6xgbh\") pod \"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30\" (UID: \"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30\") " Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.090094 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30-kube-api-access-6xgbh" (OuterVolumeSpecName: "kube-api-access-6xgbh") pod "5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30" (UID: "5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30"). InnerVolumeSpecName "kube-api-access-6xgbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.185183 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xgbh\" (UniqueName: \"kubernetes.io/projected/5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30-kube-api-access-6xgbh\") on node \"crc\" DevicePath \"\"" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.745441 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerStarted","Data":"02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463"} Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.784375 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30","Type":"ContainerDied","Data":"e8dbd23e592130d9f9e0aa16166488307f71f2c9100a85604fa7cf54d50b4bde"} Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.784437 4861 scope.go:117] "RemoveContainer" containerID="79ee53fc25068d936388e588a9b0c08c84d421fdd27be05c522e1a9fb076467e" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.784598 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.841544 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.856893 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.886468 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:20:57 crc kubenswrapper[4861]: E0129 08:20:57.886957 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30" containerName="kube-state-metrics" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.886970 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30" containerName="kube-state-metrics" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.887181 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30" containerName="kube-state-metrics" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.897785 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.897947 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.901759 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 29 08:20:57 crc kubenswrapper[4861]: I0129 08:20:57.907086 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.004010 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.004347 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.004803 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.004902 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz6fl\" (UniqueName: \"kubernetes.io/projected/50409933-4a24-4832-afbf-bdf93efe7de7-kube-api-access-dz6fl\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.106595 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.106667 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.106792 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.106818 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz6fl\" (UniqueName: \"kubernetes.io/projected/50409933-4a24-4832-afbf-bdf93efe7de7-kube-api-access-dz6fl\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.111933 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.116870 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.119478 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/50409933-4a24-4832-afbf-bdf93efe7de7-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.126317 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz6fl\" (UniqueName: \"kubernetes.io/projected/50409933-4a24-4832-afbf-bdf93efe7de7-kube-api-access-dz6fl\") pod \"kube-state-metrics-0\" (UID: \"50409933-4a24-4832-afbf-bdf93efe7de7\") " pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.229598 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.565283 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.762028 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.853088 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"50409933-4a24-4832-afbf-bdf93efe7de7","Type":"ContainerStarted","Data":"9e93054a88e46fbd76b6c2e95a99b3196685c65882b6106aecdd2a61079f966c"} Jan 29 08:20:58 crc kubenswrapper[4861]: I0129 08:20:58.863673 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerStarted","Data":"a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d"} Jan 29 08:20:59 crc kubenswrapper[4861]: I0129 08:20:59.127771 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30" path="/var/lib/kubelet/pods/5fc9ddf8-70ac-4a50-992b-1fdd2d7b6e30/volumes" Jan 29 08:20:59 crc kubenswrapper[4861]: I0129 08:20:59.876646 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"50409933-4a24-4832-afbf-bdf93efe7de7","Type":"ContainerStarted","Data":"f515c69e5a28363b9529b76b884566eaf4cdf7f770654d52a3132208cbdb26b0"} Jan 29 08:20:59 crc kubenswrapper[4861]: I0129 08:20:59.877101 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 08:20:59 crc kubenswrapper[4861]: I0129 08:20:59.879093 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerStarted","Data":"1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13"} Jan 29 08:20:59 crc kubenswrapper[4861]: I0129 08:20:59.900250 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.418464301 podStartE2EDuration="2.900230436s" podCreationTimestamp="2026-01-29 08:20:57 +0000 UTC" firstStartedPulling="2026-01-29 08:20:58.786983448 +0000 UTC m=+6350.458478005" lastFinishedPulling="2026-01-29 08:20:59.268749563 +0000 UTC m=+6350.940244140" observedRunningTime="2026-01-29 08:20:59.89849828 +0000 UTC m=+6351.569992857" watchObservedRunningTime="2026-01-29 08:20:59.900230436 +0000 UTC m=+6351.571724993" Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.630985 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.631407 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.897060 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerStarted","Data":"6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e"} Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.897458 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.897131 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="ceilometer-notification-agent" containerID="cri-o://a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d" gracePeriod=30 Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.897103 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="ceilometer-central-agent" containerID="cri-o://02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463" gracePeriod=30 Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.897135 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="sg-core" containerID="cri-o://1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13" gracePeriod=30 Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.897184 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="proxy-httpd" containerID="cri-o://6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e" gracePeriod=30 Jan 29 08:21:00 crc kubenswrapper[4861]: I0129 08:21:00.925905 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.22065287 podStartE2EDuration="6.92587794s" podCreationTimestamp="2026-01-29 08:20:54 +0000 UTC" firstStartedPulling="2026-01-29 08:20:55.850220114 +0000 UTC m=+6347.521714671" lastFinishedPulling="2026-01-29 08:21:00.555445144 +0000 UTC m=+6352.226939741" observedRunningTime="2026-01-29 08:21:00.919549633 +0000 UTC m=+6352.591044220" watchObservedRunningTime="2026-01-29 08:21:00.92587794 +0000 UTC m=+6352.597372537" Jan 29 08:21:01 crc kubenswrapper[4861]: I0129 08:21:01.908351 4861 generic.go:334] "Generic (PLEG): container finished" podID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerID="1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13" exitCode=2 Jan 29 08:21:01 crc kubenswrapper[4861]: I0129 08:21:01.908382 4861 generic.go:334] "Generic (PLEG): container finished" podID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerID="a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d" exitCode=0 Jan 29 08:21:01 crc kubenswrapper[4861]: I0129 08:21:01.908388 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerDied","Data":"1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13"} Jan 29 08:21:01 crc kubenswrapper[4861]: I0129 08:21:01.908437 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerDied","Data":"a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d"} Jan 29 08:21:05 crc kubenswrapper[4861]: I0129 08:21:05.950976 4861 generic.go:334] "Generic (PLEG): container finished" podID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerID="02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463" exitCode=0 Jan 29 08:21:05 crc kubenswrapper[4861]: I0129 08:21:05.951085 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerDied","Data":"02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463"} Jan 29 08:21:08 crc kubenswrapper[4861]: I0129 08:21:08.241178 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 08:21:25 crc kubenswrapper[4861]: I0129 08:21:25.130090 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.193992 4861 generic.go:334] "Generic (PLEG): container finished" podID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerID="71c86157784368869146a22be5d88161ba994f35fb4fcb70e45efd99c6142001" exitCode=137 Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.194178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerDied","Data":"71c86157784368869146a22be5d88161ba994f35fb4fcb70e45efd99c6142001"} Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.325661 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.373634 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-config-data\") pod \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.373761 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-scripts\") pod \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.373817 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksl2r\" (UniqueName: \"kubernetes.io/projected/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-kube-api-access-ksl2r\") pod \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.373950 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-combined-ca-bundle\") pod \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\" (UID: \"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f\") " Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.379841 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-scripts" (OuterVolumeSpecName: "scripts") pod "f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" (UID: "f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.415285 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-kube-api-access-ksl2r" (OuterVolumeSpecName: "kube-api-access-ksl2r") pod "f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" (UID: "f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f"). InnerVolumeSpecName "kube-api-access-ksl2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.478053 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.478106 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksl2r\" (UniqueName: \"kubernetes.io/projected/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-kube-api-access-ksl2r\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.499205 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-config-data" (OuterVolumeSpecName: "config-data") pod "f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" (UID: "f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.528377 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" (UID: "f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.580383 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:26 crc kubenswrapper[4861]: I0129 08:21:26.580726 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.210145 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f","Type":"ContainerDied","Data":"c695a0378d3f2faf0bd85300df7bd8a7d5c1ee3a8e26ebfed5f3efb333d74953"} Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.210193 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.210223 4861 scope.go:117] "RemoveContainer" containerID="71c86157784368869146a22be5d88161ba994f35fb4fcb70e45efd99c6142001" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.253403 4861 scope.go:117] "RemoveContainer" containerID="4b2e7e64ba8edbd065eb932e07bd509566ed6dc855516c8c71b1fd5e29b7606e" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.256880 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.284958 4861 scope.go:117] "RemoveContainer" containerID="17f0e10e4739deeb410e96824d859c3b1d5e0335d0547806b989d3c945a19ee8" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.293405 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.302792 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 29 08:21:27 crc kubenswrapper[4861]: E0129 08:21:27.303230 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-api" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.303248 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-api" Jan 29 08:21:27 crc kubenswrapper[4861]: E0129 08:21:27.303257 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-notifier" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.303264 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-notifier" Jan 29 08:21:27 crc kubenswrapper[4861]: E0129 08:21:27.303296 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-evaluator" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.303304 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-evaluator" Jan 29 08:21:27 crc kubenswrapper[4861]: E0129 08:21:27.303317 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-listener" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.303323 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-listener" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.303488 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-listener" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.303503 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-evaluator" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.303518 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-notifier" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.303531 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" containerName="aodh-api" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.305236 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.309097 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-jnvc4" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.309224 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.309340 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.309396 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.310566 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.316947 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.321034 4861 scope.go:117] "RemoveContainer" containerID="ef261af6becae1850cc3db765380ff8fa6ecc9eebc06e337ed4d9895df7df9cf" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.444988 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-config-data\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.445063 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4gbd\" (UniqueName: \"kubernetes.io/projected/9b56ac80-dc8c-477f-b911-c975cc701551-kube-api-access-d4gbd\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.445121 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-combined-ca-bundle\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.445165 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-scripts\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.445216 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-public-tls-certs\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.445616 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-internal-tls-certs\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.548939 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-scripts\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.549044 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-public-tls-certs\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.549115 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-internal-tls-certs\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.549191 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-config-data\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.549260 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4gbd\" (UniqueName: \"kubernetes.io/projected/9b56ac80-dc8c-477f-b911-c975cc701551-kube-api-access-d4gbd\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.549323 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-combined-ca-bundle\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.553376 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-internal-tls-certs\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.553545 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-scripts\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.556618 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-public-tls-certs\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.556633 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-combined-ca-bundle\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.559098 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b56ac80-dc8c-477f-b911-c975cc701551-config-data\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.568239 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4gbd\" (UniqueName: \"kubernetes.io/projected/9b56ac80-dc8c-477f-b911-c975cc701551-kube-api-access-d4gbd\") pod \"aodh-0\" (UID: \"9b56ac80-dc8c-477f-b911-c975cc701551\") " pod="openstack/aodh-0" Jan 29 08:21:27 crc kubenswrapper[4861]: I0129 08:21:27.621055 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 29 08:21:28 crc kubenswrapper[4861]: I0129 08:21:28.175856 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 29 08:21:28 crc kubenswrapper[4861]: I0129 08:21:28.224214 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"9b56ac80-dc8c-477f-b911-c975cc701551","Type":"ContainerStarted","Data":"b5c03d692d3cbf85935806ef9dad5afc8dbc0414d275d7b1f200b1ad1ec4a132"} Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.131400 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f" path="/var/lib/kubelet/pods/f6e199d0-0f2e-4410-b22e-0e9f15c2fe3f/volumes" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.239055 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"9b56ac80-dc8c-477f-b911-c975cc701551","Type":"ContainerStarted","Data":"6e74735f5c914f8e4d91e299305b194b5fbd9c4e4e8df12434b412387c5d22fd"} Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.317199 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6fc95dcff5-m224s"] Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.319425 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.321656 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.335133 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6fc95dcff5-m224s"] Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.514273 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-openstack-cell1\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.514916 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdvsv\" (UniqueName: \"kubernetes.io/projected/33caa883-1209-40cc-9c11-598e194f85b0-kube-api-access-mdvsv\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.515113 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-config\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.515393 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-dns-svc\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.515639 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.515840 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.617441 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.617514 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.617571 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-openstack-cell1\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.617608 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdvsv\" (UniqueName: \"kubernetes.io/projected/33caa883-1209-40cc-9c11-598e194f85b0-kube-api-access-mdvsv\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.617649 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-config\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.617683 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-dns-svc\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.618732 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-dns-svc\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.619000 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-openstack-cell1\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.619587 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-config\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.620520 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.620611 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.636953 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdvsv\" (UniqueName: \"kubernetes.io/projected/33caa883-1209-40cc-9c11-598e194f85b0-kube-api-access-mdvsv\") pod \"dnsmasq-dns-6fc95dcff5-m224s\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.873858 4861 scope.go:117] "RemoveContainer" containerID="5d3f504535707a2504a5472775f3ab594598e93fece93a6ad7f2384b9bf6e7c4" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.909675 4861 scope.go:117] "RemoveContainer" containerID="f2e3d1cea217dc1a6c7a3acd9d45d94df20482ac02b980207b00a34bd59b039d" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.937580 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:29 crc kubenswrapper[4861]: I0129 08:21:29.980572 4861 scope.go:117] "RemoveContainer" containerID="3c3bb80d979163a30693a6c0e6f92548937073636ecfe998a868fe7087e329a1" Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.014764 4861 scope.go:117] "RemoveContainer" containerID="491f08d1639fc15e2efeb983d79c23bdd3fbb2e60e8eb7bb339f8b16464c4367" Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.056577 4861 scope.go:117] "RemoveContainer" containerID="d1b4b2579f93de80c6fb7542c2df049f4c23d233ffdb9f97239370ee5449582e" Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.118427 4861 scope.go:117] "RemoveContainer" containerID="1f1ce88188752c974ccb5cb5e01ee7b0fe56ae0bcf19b02a2b6fd59d9e68128a" Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.274384 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"9b56ac80-dc8c-477f-b911-c975cc701551","Type":"ContainerStarted","Data":"1513e1f8097bc6ff27453c29fd88c4d225cf316ed1333f2d36f77ed5ba4c741b"} Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.467659 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6fc95dcff5-m224s"] Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.630521 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.630570 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.630612 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.631436 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:21:30 crc kubenswrapper[4861]: I0129 08:21:30.631490 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" gracePeriod=600 Jan 29 08:21:30 crc kubenswrapper[4861]: E0129 08:21:30.753609 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.248400 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.304996 4861 generic.go:334] "Generic (PLEG): container finished" podID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerID="6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e" exitCode=137 Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.305126 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.305119 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerDied","Data":"6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e"} Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.305355 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5576e837-8033-4a6c-8ad8-9ddc7477846f","Type":"ContainerDied","Data":"455c4b9c1fd940668158d2e073664c3280f3b58f24f92e625570160e6780cdab"} Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.305467 4861 scope.go:117] "RemoveContainer" containerID="6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.312719 4861 generic.go:334] "Generic (PLEG): container finished" podID="33caa883-1209-40cc-9c11-598e194f85b0" containerID="045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517" exitCode=0 Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.312839 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" event={"ID":"33caa883-1209-40cc-9c11-598e194f85b0","Type":"ContainerDied","Data":"045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517"} Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.312866 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" event={"ID":"33caa883-1209-40cc-9c11-598e194f85b0","Type":"ContainerStarted","Data":"0d5c28d4d52a75da897beecbd697d8232ccde7c4fe4ff52ed3e3dcb927bea52f"} Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.319131 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"9b56ac80-dc8c-477f-b911-c975cc701551","Type":"ContainerStarted","Data":"56c4c176e566bf2139e16c8a1a2943064d35e81f34e643b63b2548586c86835f"} Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.319172 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"9b56ac80-dc8c-477f-b911-c975cc701551","Type":"ContainerStarted","Data":"976bd7258e8ce45dd926ed5126a3a78b8ea6cdd7ed5ff1745c215a95f3153f6e"} Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.358581 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94gzn\" (UniqueName: \"kubernetes.io/projected/5576e837-8033-4a6c-8ad8-9ddc7477846f-kube-api-access-94gzn\") pod \"5576e837-8033-4a6c-8ad8-9ddc7477846f\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.358791 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-run-httpd\") pod \"5576e837-8033-4a6c-8ad8-9ddc7477846f\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.358951 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-combined-ca-bundle\") pod \"5576e837-8033-4a6c-8ad8-9ddc7477846f\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.358974 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-scripts\") pod \"5576e837-8033-4a6c-8ad8-9ddc7477846f\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.358994 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-sg-core-conf-yaml\") pod \"5576e837-8033-4a6c-8ad8-9ddc7477846f\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.359272 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-config-data\") pod \"5576e837-8033-4a6c-8ad8-9ddc7477846f\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.359313 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-log-httpd\") pod \"5576e837-8033-4a6c-8ad8-9ddc7477846f\" (UID: \"5576e837-8033-4a6c-8ad8-9ddc7477846f\") " Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.362986 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5576e837-8033-4a6c-8ad8-9ddc7477846f" (UID: "5576e837-8033-4a6c-8ad8-9ddc7477846f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.364278 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5576e837-8033-4a6c-8ad8-9ddc7477846f" (UID: "5576e837-8033-4a6c-8ad8-9ddc7477846f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.400824 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" exitCode=0 Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.400873 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b"} Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.402636 4861 scope.go:117] "RemoveContainer" containerID="1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.403797 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5576e837-8033-4a6c-8ad8-9ddc7477846f-kube-api-access-94gzn" (OuterVolumeSpecName: "kube-api-access-94gzn") pod "5576e837-8033-4a6c-8ad8-9ddc7477846f" (UID: "5576e837-8033-4a6c-8ad8-9ddc7477846f"). InnerVolumeSpecName "kube-api-access-94gzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.404225 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-scripts" (OuterVolumeSpecName: "scripts") pod "5576e837-8033-4a6c-8ad8-9ddc7477846f" (UID: "5576e837-8033-4a6c-8ad8-9ddc7477846f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.411879 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.412274 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.427396 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.650001166 podStartE2EDuration="4.427373116s" podCreationTimestamp="2026-01-29 08:21:27 +0000 UTC" firstStartedPulling="2026-01-29 08:21:28.178549672 +0000 UTC m=+6379.850044239" lastFinishedPulling="2026-01-29 08:21:30.955921642 +0000 UTC m=+6382.627416189" observedRunningTime="2026-01-29 08:21:31.362308034 +0000 UTC m=+6383.033802591" watchObservedRunningTime="2026-01-29 08:21:31.427373116 +0000 UTC m=+6383.098867693" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.461715 4861 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.462049 4861 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.462059 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94gzn\" (UniqueName: \"kubernetes.io/projected/5576e837-8033-4a6c-8ad8-9ddc7477846f-kube-api-access-94gzn\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.462085 4861 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5576e837-8033-4a6c-8ad8-9ddc7477846f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.524277 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5576e837-8033-4a6c-8ad8-9ddc7477846f" (UID: "5576e837-8033-4a6c-8ad8-9ddc7477846f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.564348 4861 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.620238 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5576e837-8033-4a6c-8ad8-9ddc7477846f" (UID: "5576e837-8033-4a6c-8ad8-9ddc7477846f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.620313 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-config-data" (OuterVolumeSpecName: "config-data") pod "5576e837-8033-4a6c-8ad8-9ddc7477846f" (UID: "5576e837-8033-4a6c-8ad8-9ddc7477846f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.667761 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.668398 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5576e837-8033-4a6c-8ad8-9ddc7477846f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.697740 4861 scope.go:117] "RemoveContainer" containerID="a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.717111 4861 scope.go:117] "RemoveContainer" containerID="02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.747631 4861 scope.go:117] "RemoveContainer" containerID="6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e" Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.747957 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e\": container with ID starting with 6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e not found: ID does not exist" containerID="6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.747999 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e"} err="failed to get container status \"6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e\": rpc error: code = NotFound desc = could not find container \"6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e\": container with ID starting with 6624523ea7c6ec20278ec3619f8bdb4ed0bf5fde26ee59e191dd46f3e65c9b7e not found: ID does not exist" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.748023 4861 scope.go:117] "RemoveContainer" containerID="1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13" Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.748323 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13\": container with ID starting with 1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13 not found: ID does not exist" containerID="1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.748350 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13"} err="failed to get container status \"1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13\": rpc error: code = NotFound desc = could not find container \"1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13\": container with ID starting with 1f7512a2d6a5f748e5bbc28d5faa18b9b7266d8c5db2fbe20337270178f8aa13 not found: ID does not exist" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.748370 4861 scope.go:117] "RemoveContainer" containerID="a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d" Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.748584 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d\": container with ID starting with a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d not found: ID does not exist" containerID="a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.748606 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d"} err="failed to get container status \"a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d\": rpc error: code = NotFound desc = could not find container \"a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d\": container with ID starting with a507d09ac659af1d1959cbb6e01fb93dfc910a599da18228701cf7c8bc34906d not found: ID does not exist" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.748621 4861 scope.go:117] "RemoveContainer" containerID="02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463" Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.748762 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463\": container with ID starting with 02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463 not found: ID does not exist" containerID="02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.748777 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463"} err="failed to get container status \"02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463\": rpc error: code = NotFound desc = could not find container \"02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463\": container with ID starting with 02c9a478a5e5a586ade26cea2212795e118beab24e381f8e92bdb22d32725463 not found: ID does not exist" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.748788 4861 scope.go:117] "RemoveContainer" containerID="fcca75648caaf02940cb3f4b6284809f6ed607019ae9564066e870fde1b501ad" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.952571 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.969501 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.982497 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.983291 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="ceilometer-central-agent" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.983356 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="ceilometer-central-agent" Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.983370 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="sg-core" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.983377 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="sg-core" Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.983420 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="ceilometer-notification-agent" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.983569 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="ceilometer-notification-agent" Jan 29 08:21:31 crc kubenswrapper[4861]: E0129 08:21:31.983603 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="proxy-httpd" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.983610 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="proxy-httpd" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.984032 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="sg-core" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.984058 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="ceilometer-central-agent" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.984101 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="ceilometer-notification-agent" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.984118 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" containerName="proxy-httpd" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.992037 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.997157 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 08:21:31 crc kubenswrapper[4861]: I0129 08:21:31.997480 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.003442 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.004459 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.112348 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-scripts\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.112677 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-log-httpd\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.112854 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7l5j\" (UniqueName: \"kubernetes.io/projected/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-kube-api-access-w7l5j\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.112979 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.113097 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.113185 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-config-data\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.113301 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-run-httpd\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.113581 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.221602 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-log-httpd\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.221915 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7l5j\" (UniqueName: \"kubernetes.io/projected/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-kube-api-access-w7l5j\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.222087 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.222194 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.222276 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-config-data\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.222385 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-run-httpd\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.222709 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.222934 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-log-httpd\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.223040 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-scripts\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.225880 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-run-httpd\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.234139 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-scripts\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.247823 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.248119 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-config-data\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.248358 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.265432 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.274352 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7l5j\" (UniqueName: \"kubernetes.io/projected/f3b00fe8-8fc2-4499-8121-fc90f5be37dd-kube-api-access-w7l5j\") pod \"ceilometer-0\" (UID: \"f3b00fe8-8fc2-4499-8121-fc90f5be37dd\") " pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.317708 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.421449 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" event={"ID":"33caa883-1209-40cc-9c11-598e194f85b0","Type":"ContainerStarted","Data":"4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8"} Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.421496 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.447442 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" podStartSLOduration=3.447424412 podStartE2EDuration="3.447424412s" podCreationTimestamp="2026-01-29 08:21:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:21:32.439824862 +0000 UTC m=+6384.111319429" watchObservedRunningTime="2026-01-29 08:21:32.447424412 +0000 UTC m=+6384.118918969" Jan 29 08:21:32 crc kubenswrapper[4861]: I0129 08:21:32.840578 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 08:21:32 crc kubenswrapper[4861]: W0129 08:21:32.864172 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3b00fe8_8fc2_4499_8121_fc90f5be37dd.slice/crio-d6eb06f36becd35974c1e674670ed8ff0a1e73f05c3bc484d210278c16e74c9a WatchSource:0}: Error finding container d6eb06f36becd35974c1e674670ed8ff0a1e73f05c3bc484d210278c16e74c9a: Status 404 returned error can't find the container with id d6eb06f36becd35974c1e674670ed8ff0a1e73f05c3bc484d210278c16e74c9a Jan 29 08:21:33 crc kubenswrapper[4861]: I0129 08:21:33.146050 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5576e837-8033-4a6c-8ad8-9ddc7477846f" path="/var/lib/kubelet/pods/5576e837-8033-4a6c-8ad8-9ddc7477846f/volumes" Jan 29 08:21:33 crc kubenswrapper[4861]: I0129 08:21:33.430428 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b00fe8-8fc2-4499-8121-fc90f5be37dd","Type":"ContainerStarted","Data":"d6eb06f36becd35974c1e674670ed8ff0a1e73f05c3bc484d210278c16e74c9a"} Jan 29 08:21:34 crc kubenswrapper[4861]: I0129 08:21:34.446683 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b00fe8-8fc2-4499-8121-fc90f5be37dd","Type":"ContainerStarted","Data":"a4f6df5c885f6dbd21990926d6994550c3cac3311d5f1a16c729b4c9d6c9b873"} Jan 29 08:21:35 crc kubenswrapper[4861]: I0129 08:21:35.460557 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b00fe8-8fc2-4499-8121-fc90f5be37dd","Type":"ContainerStarted","Data":"3a09dc8c45427d207bb49ce8c672575794ad048250e36603e6250c4082dba74a"} Jan 29 08:21:35 crc kubenswrapper[4861]: I0129 08:21:35.460923 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b00fe8-8fc2-4499-8121-fc90f5be37dd","Type":"ContainerStarted","Data":"75f89a7a8e8c43d5b7aa02ddb29b86f804f3f801451fe87a7b91b84020225ab7"} Jan 29 08:21:38 crc kubenswrapper[4861]: I0129 08:21:38.496814 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b00fe8-8fc2-4499-8121-fc90f5be37dd","Type":"ContainerStarted","Data":"c765802f057114632b2ca12fde733d309691c24129cbb6689b6f2aa12d915743"} Jan 29 08:21:38 crc kubenswrapper[4861]: I0129 08:21:38.499493 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 08:21:38 crc kubenswrapper[4861]: I0129 08:21:38.530769 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.083612319 podStartE2EDuration="7.530737289s" podCreationTimestamp="2026-01-29 08:21:31 +0000 UTC" firstStartedPulling="2026-01-29 08:21:32.867510754 +0000 UTC m=+6384.539005311" lastFinishedPulling="2026-01-29 08:21:37.314635684 +0000 UTC m=+6388.986130281" observedRunningTime="2026-01-29 08:21:38.521601549 +0000 UTC m=+6390.193096196" watchObservedRunningTime="2026-01-29 08:21:38.530737289 +0000 UTC m=+6390.202231896" Jan 29 08:21:39 crc kubenswrapper[4861]: I0129 08:21:39.940189 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.033551 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ddf5b9dd7-m7g85"] Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.033811 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" podUID="fb991421-a937-4891-b21e-cdd66b1675a7" containerName="dnsmasq-dns" containerID="cri-o://0877304d792f3e9b53045562e0c60472b6aec3900804c3d2ffa2a76222b55324" gracePeriod=10 Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.286963 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5585ff97bc-f7zkv"] Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.298328 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.330585 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5585ff97bc-f7zkv"] Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.416714 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-config\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.416763 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-openstack-cell1\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.416779 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-ovsdbserver-nb\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.416826 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt6jf\" (UniqueName: \"kubernetes.io/projected/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-kube-api-access-wt6jf\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.416869 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-dns-svc\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.416974 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-ovsdbserver-sb\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.515532 4861 generic.go:334] "Generic (PLEG): container finished" podID="fb991421-a937-4891-b21e-cdd66b1675a7" containerID="0877304d792f3e9b53045562e0c60472b6aec3900804c3d2ffa2a76222b55324" exitCode=0 Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.515600 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" event={"ID":"fb991421-a937-4891-b21e-cdd66b1675a7","Type":"ContainerDied","Data":"0877304d792f3e9b53045562e0c60472b6aec3900804c3d2ffa2a76222b55324"} Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.518469 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-dns-svc\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.518618 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-ovsdbserver-sb\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.518665 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-config\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.518687 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-openstack-cell1\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.518703 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-ovsdbserver-nb\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.518743 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt6jf\" (UniqueName: \"kubernetes.io/projected/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-kube-api-access-wt6jf\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.519821 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-dns-svc\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.519985 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-config\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.520896 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-ovsdbserver-sb\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.521047 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-ovsdbserver-nb\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.521279 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-openstack-cell1\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.540654 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt6jf\" (UniqueName: \"kubernetes.io/projected/ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70-kube-api-access-wt6jf\") pod \"dnsmasq-dns-5585ff97bc-f7zkv\" (UID: \"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70\") " pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.629642 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.729387 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.825893 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-nb\") pod \"fb991421-a937-4891-b21e-cdd66b1675a7\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.826849 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-config\") pod \"fb991421-a937-4891-b21e-cdd66b1675a7\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.826909 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-sb\") pod \"fb991421-a937-4891-b21e-cdd66b1675a7\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.827015 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-dns-svc\") pod \"fb991421-a937-4891-b21e-cdd66b1675a7\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.827034 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9m95\" (UniqueName: \"kubernetes.io/projected/fb991421-a937-4891-b21e-cdd66b1675a7-kube-api-access-x9m95\") pod \"fb991421-a937-4891-b21e-cdd66b1675a7\" (UID: \"fb991421-a937-4891-b21e-cdd66b1675a7\") " Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.839083 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb991421-a937-4891-b21e-cdd66b1675a7-kube-api-access-x9m95" (OuterVolumeSpecName: "kube-api-access-x9m95") pod "fb991421-a937-4891-b21e-cdd66b1675a7" (UID: "fb991421-a937-4891-b21e-cdd66b1675a7"). InnerVolumeSpecName "kube-api-access-x9m95". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.879354 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fb991421-a937-4891-b21e-cdd66b1675a7" (UID: "fb991421-a937-4891-b21e-cdd66b1675a7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.884974 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-config" (OuterVolumeSpecName: "config") pod "fb991421-a937-4891-b21e-cdd66b1675a7" (UID: "fb991421-a937-4891-b21e-cdd66b1675a7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.899139 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fb991421-a937-4891-b21e-cdd66b1675a7" (UID: "fb991421-a937-4891-b21e-cdd66b1675a7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.912058 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fb991421-a937-4891-b21e-cdd66b1675a7" (UID: "fb991421-a937-4891-b21e-cdd66b1675a7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.936533 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.936563 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9m95\" (UniqueName: \"kubernetes.io/projected/fb991421-a937-4891-b21e-cdd66b1675a7-kube-api-access-x9m95\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.936575 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.936585 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:40 crc kubenswrapper[4861]: I0129 08:21:40.936592 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb991421-a937-4891-b21e-cdd66b1675a7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:41 crc kubenswrapper[4861]: W0129 08:21:41.127861 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded67e6e0_58ef_42d0_8a69_a7f1b0b0da70.slice/crio-55286acc72960c69803e07e1ea82a7968d438983dd4b2ef28a7d8b83b4cd4e60 WatchSource:0}: Error finding container 55286acc72960c69803e07e1ea82a7968d438983dd4b2ef28a7d8b83b4cd4e60: Status 404 returned error can't find the container with id 55286acc72960c69803e07e1ea82a7968d438983dd4b2ef28a7d8b83b4cd4e60 Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.133666 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5585ff97bc-f7zkv"] Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.525567 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" event={"ID":"fb991421-a937-4891-b21e-cdd66b1675a7","Type":"ContainerDied","Data":"1187989f3b03492bc02ef62fdcfb3689d949a3dfc7d6a1fedbb0ccd5ccc5d617"} Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.525891 4861 scope.go:117] "RemoveContainer" containerID="0877304d792f3e9b53045562e0c60472b6aec3900804c3d2ffa2a76222b55324" Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.525584 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ddf5b9dd7-m7g85" Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.532653 4861 generic.go:334] "Generic (PLEG): container finished" podID="ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70" containerID="f427a3f442bfd9a1da3c67872ecabfc02e6bf15ca74c1b68a09f8d11dc8c2b1c" exitCode=0 Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.533539 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" event={"ID":"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70","Type":"ContainerDied","Data":"f427a3f442bfd9a1da3c67872ecabfc02e6bf15ca74c1b68a09f8d11dc8c2b1c"} Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.533627 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" event={"ID":"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70","Type":"ContainerStarted","Data":"55286acc72960c69803e07e1ea82a7968d438983dd4b2ef28a7d8b83b4cd4e60"} Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.551698 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ddf5b9dd7-m7g85"] Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.560161 4861 scope.go:117] "RemoveContainer" containerID="9d80a4c15f31b5c560542498ca3b6784534955c005e9bcceada650f392d0f1af" Jan 29 08:21:41 crc kubenswrapper[4861]: I0129 08:21:41.565424 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ddf5b9dd7-m7g85"] Jan 29 08:21:42 crc kubenswrapper[4861]: I0129 08:21:42.547628 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" event={"ID":"ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70","Type":"ContainerStarted","Data":"775c644b9b00899658ab6455eb11fa42382d427325c528f000a9f7220dc64327"} Jan 29 08:21:42 crc kubenswrapper[4861]: I0129 08:21:42.548065 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:42 crc kubenswrapper[4861]: I0129 08:21:42.574503 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" podStartSLOduration=2.574483126 podStartE2EDuration="2.574483126s" podCreationTimestamp="2026-01-29 08:21:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:21:42.569707931 +0000 UTC m=+6394.241202488" watchObservedRunningTime="2026-01-29 08:21:42.574483126 +0000 UTC m=+6394.245977683" Jan 29 08:21:43 crc kubenswrapper[4861]: I0129 08:21:43.131017 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb991421-a937-4891-b21e-cdd66b1675a7" path="/var/lib/kubelet/pods/fb991421-a937-4891-b21e-cdd66b1675a7/volumes" Jan 29 08:21:46 crc kubenswrapper[4861]: I0129 08:21:46.116425 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:21:46 crc kubenswrapper[4861]: E0129 08:21:46.116937 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.047614 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7629-account-create-update-ts6d9"] Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.059577 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-9ghkj"] Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.070885 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-wvqhr"] Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.081849 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-wvqhr"] Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.093430 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-9ghkj"] Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.102380 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7629-account-create-update-ts6d9"] Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.632718 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5585ff97bc-f7zkv" Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.700171 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6fc95dcff5-m224s"] Jan 29 08:21:50 crc kubenswrapper[4861]: I0129 08:21:50.701339 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" podUID="33caa883-1209-40cc-9c11-598e194f85b0" containerName="dnsmasq-dns" containerID="cri-o://4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8" gracePeriod=10 Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.071950 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-41c8-account-create-update-w5rm7"] Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.080944 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-62a6-account-create-update-xvkg5"] Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.092744 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-41c8-account-create-update-w5rm7"] Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.103878 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-62a6-account-create-update-xvkg5"] Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.113133 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-tgkdv"] Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.138918 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bf774fb-d1b4-48cf-bd1e-c92e921cad22" path="/var/lib/kubelet/pods/0bf774fb-d1b4-48cf-bd1e-c92e921cad22/volumes" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.140621 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78f9471d-1f05-4771-9988-dd8882646a84" path="/var/lib/kubelet/pods/78f9471d-1f05-4771-9988-dd8882646a84/volumes" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.142331 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98e4a19c-e12f-44b8-9c7c-724acbc661de" path="/var/lib/kubelet/pods/98e4a19c-e12f-44b8-9c7c-724acbc661de/volumes" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.146359 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f072fe6f-71a4-449d-8efe-17d5dad2cd43" path="/var/lib/kubelet/pods/f072fe6f-71a4-449d-8efe-17d5dad2cd43/volumes" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.153267 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8565e37-6564-463f-b39b-9613e4e33d5d" path="/var/lib/kubelet/pods/f8565e37-6564-463f-b39b-9613e4e33d5d/volumes" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.154367 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-tgkdv"] Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.243965 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.292740 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-openstack-cell1\") pod \"33caa883-1209-40cc-9c11-598e194f85b0\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.292798 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-nb\") pod \"33caa883-1209-40cc-9c11-598e194f85b0\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.292893 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-config\") pod \"33caa883-1209-40cc-9c11-598e194f85b0\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.293103 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-dns-svc\") pod \"33caa883-1209-40cc-9c11-598e194f85b0\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.293150 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-sb\") pod \"33caa883-1209-40cc-9c11-598e194f85b0\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.293210 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdvsv\" (UniqueName: \"kubernetes.io/projected/33caa883-1209-40cc-9c11-598e194f85b0-kube-api-access-mdvsv\") pod \"33caa883-1209-40cc-9c11-598e194f85b0\" (UID: \"33caa883-1209-40cc-9c11-598e194f85b0\") " Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.323154 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33caa883-1209-40cc-9c11-598e194f85b0-kube-api-access-mdvsv" (OuterVolumeSpecName: "kube-api-access-mdvsv") pod "33caa883-1209-40cc-9c11-598e194f85b0" (UID: "33caa883-1209-40cc-9c11-598e194f85b0"). InnerVolumeSpecName "kube-api-access-mdvsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.366431 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-config" (OuterVolumeSpecName: "config") pod "33caa883-1209-40cc-9c11-598e194f85b0" (UID: "33caa883-1209-40cc-9c11-598e194f85b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.367897 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "33caa883-1209-40cc-9c11-598e194f85b0" (UID: "33caa883-1209-40cc-9c11-598e194f85b0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.385571 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "33caa883-1209-40cc-9c11-598e194f85b0" (UID: "33caa883-1209-40cc-9c11-598e194f85b0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.391110 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "33caa883-1209-40cc-9c11-598e194f85b0" (UID: "33caa883-1209-40cc-9c11-598e194f85b0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.402607 4861 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.402643 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.402655 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdvsv\" (UniqueName: \"kubernetes.io/projected/33caa883-1209-40cc-9c11-598e194f85b0-kube-api-access-mdvsv\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.402663 4861 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.402675 4861 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-config\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.405014 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "33caa883-1209-40cc-9c11-598e194f85b0" (UID: "33caa883-1209-40cc-9c11-598e194f85b0"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.504724 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33caa883-1209-40cc-9c11-598e194f85b0-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.657470 4861 generic.go:334] "Generic (PLEG): container finished" podID="33caa883-1209-40cc-9c11-598e194f85b0" containerID="4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8" exitCode=0 Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.657515 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" event={"ID":"33caa883-1209-40cc-9c11-598e194f85b0","Type":"ContainerDied","Data":"4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8"} Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.657548 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" event={"ID":"33caa883-1209-40cc-9c11-598e194f85b0","Type":"ContainerDied","Data":"0d5c28d4d52a75da897beecbd697d8232ccde7c4fe4ff52ed3e3dcb927bea52f"} Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.657567 4861 scope.go:117] "RemoveContainer" containerID="4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.657896 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fc95dcff5-m224s" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.698289 4861 scope.go:117] "RemoveContainer" containerID="045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.719237 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6fc95dcff5-m224s"] Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.725565 4861 scope.go:117] "RemoveContainer" containerID="4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8" Jan 29 08:21:51 crc kubenswrapper[4861]: E0129 08:21:51.726002 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8\": container with ID starting with 4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8 not found: ID does not exist" containerID="4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.726036 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8"} err="failed to get container status \"4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8\": rpc error: code = NotFound desc = could not find container \"4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8\": container with ID starting with 4895d76c323c9c7b0ea9790000da434b8bcc360ca1c30b41ddaa945be6a50fe8 not found: ID does not exist" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.726057 4861 scope.go:117] "RemoveContainer" containerID="045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517" Jan 29 08:21:51 crc kubenswrapper[4861]: E0129 08:21:51.726303 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517\": container with ID starting with 045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517 not found: ID does not exist" containerID="045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.726324 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517"} err="failed to get container status \"045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517\": rpc error: code = NotFound desc = could not find container \"045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517\": container with ID starting with 045383dc99e9bf709ce8b6a4ef7a0dca3565d853cab9714cb5965f72e45f0517 not found: ID does not exist" Jan 29 08:21:51 crc kubenswrapper[4861]: I0129 08:21:51.730013 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6fc95dcff5-m224s"] Jan 29 08:21:51 crc kubenswrapper[4861]: E0129 08:21:51.909982 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33caa883_1209_40cc_9c11_598e194f85b0.slice/crio-0d5c28d4d52a75da897beecbd697d8232ccde7c4fe4ff52ed3e3dcb927bea52f\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33caa883_1209_40cc_9c11_598e194f85b0.slice\": RecentStats: unable to find data in memory cache]" Jan 29 08:21:53 crc kubenswrapper[4861]: I0129 08:21:53.126705 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33caa883-1209-40cc-9c11-598e194f85b0" path="/var/lib/kubelet/pods/33caa883-1209-40cc-9c11-598e194f85b0/volumes" Jan 29 08:21:53 crc kubenswrapper[4861]: I0129 08:21:53.127893 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40f7a1a2-bbca-4911-9337-5d2ce1129c21" path="/var/lib/kubelet/pods/40f7a1a2-bbca-4911-9337-5d2ce1129c21/volumes" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.036674 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kfwz9"] Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.045715 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-kfwz9"] Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.184034 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw"] Jan 29 08:22:00 crc kubenswrapper[4861]: E0129 08:22:00.184755 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb991421-a937-4891-b21e-cdd66b1675a7" containerName="init" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.184774 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb991421-a937-4891-b21e-cdd66b1675a7" containerName="init" Jan 29 08:22:00 crc kubenswrapper[4861]: E0129 08:22:00.184789 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33caa883-1209-40cc-9c11-598e194f85b0" containerName="dnsmasq-dns" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.184796 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="33caa883-1209-40cc-9c11-598e194f85b0" containerName="dnsmasq-dns" Jan 29 08:22:00 crc kubenswrapper[4861]: E0129 08:22:00.184818 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb991421-a937-4891-b21e-cdd66b1675a7" containerName="dnsmasq-dns" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.184825 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb991421-a937-4891-b21e-cdd66b1675a7" containerName="dnsmasq-dns" Jan 29 08:22:00 crc kubenswrapper[4861]: E0129 08:22:00.184831 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33caa883-1209-40cc-9c11-598e194f85b0" containerName="init" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.184836 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="33caa883-1209-40cc-9c11-598e194f85b0" containerName="init" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.185019 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb991421-a937-4891-b21e-cdd66b1675a7" containerName="dnsmasq-dns" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.185048 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="33caa883-1209-40cc-9c11-598e194f85b0" containerName="dnsmasq-dns" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.185755 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.190386 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.190626 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.190811 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.190916 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.201647 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw"] Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.319026 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.319098 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.319158 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.319243 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgx24\" (UniqueName: \"kubernetes.io/projected/0494d495-b64c-456c-8424-e929353eea07-kube-api-access-zgx24\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.420677 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.420759 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.420841 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.420929 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgx24\" (UniqueName: \"kubernetes.io/projected/0494d495-b64c-456c-8424-e929353eea07-kube-api-access-zgx24\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.428679 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.429599 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.433749 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.454040 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgx24\" (UniqueName: \"kubernetes.io/projected/0494d495-b64c-456c-8424-e929353eea07-kube-api-access-zgx24\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:00 crc kubenswrapper[4861]: I0129 08:22:00.512515 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:01 crc kubenswrapper[4861]: I0129 08:22:01.122372 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:22:01 crc kubenswrapper[4861]: E0129 08:22:01.122854 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:22:01 crc kubenswrapper[4861]: I0129 08:22:01.136464 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb01f98a-c389-43d5-b197-fabbed6a8288" path="/var/lib/kubelet/pods/cb01f98a-c389-43d5-b197-fabbed6a8288/volumes" Jan 29 08:22:01 crc kubenswrapper[4861]: I0129 08:22:01.297382 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw"] Jan 29 08:22:01 crc kubenswrapper[4861]: W0129 08:22:01.310590 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0494d495_b64c_456c_8424_e929353eea07.slice/crio-201c44c3a4705c4b4059869361323bbaf2283369f926d21dd660bdc010c813f7 WatchSource:0}: Error finding container 201c44c3a4705c4b4059869361323bbaf2283369f926d21dd660bdc010c813f7: Status 404 returned error can't find the container with id 201c44c3a4705c4b4059869361323bbaf2283369f926d21dd660bdc010c813f7 Jan 29 08:22:01 crc kubenswrapper[4861]: I0129 08:22:01.762481 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" event={"ID":"0494d495-b64c-456c-8424-e929353eea07","Type":"ContainerStarted","Data":"201c44c3a4705c4b4059869361323bbaf2283369f926d21dd660bdc010c813f7"} Jan 29 08:22:02 crc kubenswrapper[4861]: I0129 08:22:02.329586 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 08:22:10 crc kubenswrapper[4861]: I0129 08:22:10.058699 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:22:10 crc kubenswrapper[4861]: I0129 08:22:10.852272 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" event={"ID":"0494d495-b64c-456c-8424-e929353eea07","Type":"ContainerStarted","Data":"bd8214798b6cdf11c02b50cf890db384d6171076e96e2581732fd4159edd8166"} Jan 29 08:22:10 crc kubenswrapper[4861]: I0129 08:22:10.881428 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" podStartSLOduration=2.138093032 podStartE2EDuration="10.881407042s" podCreationTimestamp="2026-01-29 08:22:00 +0000 UTC" firstStartedPulling="2026-01-29 08:22:01.31281784 +0000 UTC m=+6412.984312397" lastFinishedPulling="2026-01-29 08:22:10.05613185 +0000 UTC m=+6421.727626407" observedRunningTime="2026-01-29 08:22:10.876698778 +0000 UTC m=+6422.548193375" watchObservedRunningTime="2026-01-29 08:22:10.881407042 +0000 UTC m=+6422.552901609" Jan 29 08:22:15 crc kubenswrapper[4861]: I0129 08:22:15.118377 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:22:15 crc kubenswrapper[4861]: E0129 08:22:15.120313 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:22:19 crc kubenswrapper[4861]: I0129 08:22:19.053009 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-b924h"] Jan 29 08:22:19 crc kubenswrapper[4861]: I0129 08:22:19.062235 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-b924h"] Jan 29 08:22:19 crc kubenswrapper[4861]: I0129 08:22:19.133514 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51592fcf-703f-426a-b6a0-26404449057b" path="/var/lib/kubelet/pods/51592fcf-703f-426a-b6a0-26404449057b/volumes" Jan 29 08:22:20 crc kubenswrapper[4861]: I0129 08:22:20.033900 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-dz5rn"] Jan 29 08:22:20 crc kubenswrapper[4861]: I0129 08:22:20.043139 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-dz5rn"] Jan 29 08:22:21 crc kubenswrapper[4861]: I0129 08:22:21.136684 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ee4d88f-ac71-43c0-819e-3813cde88dc1" path="/var/lib/kubelet/pods/5ee4d88f-ac71-43c0-819e-3813cde88dc1/volumes" Jan 29 08:22:24 crc kubenswrapper[4861]: I0129 08:22:24.340694 4861 generic.go:334] "Generic (PLEG): container finished" podID="0494d495-b64c-456c-8424-e929353eea07" containerID="bd8214798b6cdf11c02b50cf890db384d6171076e96e2581732fd4159edd8166" exitCode=0 Jan 29 08:22:24 crc kubenswrapper[4861]: I0129 08:22:24.340817 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" event={"ID":"0494d495-b64c-456c-8424-e929353eea07","Type":"ContainerDied","Data":"bd8214798b6cdf11c02b50cf890db384d6171076e96e2581732fd4159edd8166"} Jan 29 08:22:25 crc kubenswrapper[4861]: I0129 08:22:25.982340 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.072771 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-ssh-key-openstack-cell1\") pod \"0494d495-b64c-456c-8424-e929353eea07\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.073256 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-inventory\") pod \"0494d495-b64c-456c-8424-e929353eea07\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.073591 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-pre-adoption-validation-combined-ca-bundle\") pod \"0494d495-b64c-456c-8424-e929353eea07\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.073702 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgx24\" (UniqueName: \"kubernetes.io/projected/0494d495-b64c-456c-8424-e929353eea07-kube-api-access-zgx24\") pod \"0494d495-b64c-456c-8424-e929353eea07\" (UID: \"0494d495-b64c-456c-8424-e929353eea07\") " Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.085277 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0494d495-b64c-456c-8424-e929353eea07-kube-api-access-zgx24" (OuterVolumeSpecName: "kube-api-access-zgx24") pod "0494d495-b64c-456c-8424-e929353eea07" (UID: "0494d495-b64c-456c-8424-e929353eea07"). InnerVolumeSpecName "kube-api-access-zgx24". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.085401 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "0494d495-b64c-456c-8424-e929353eea07" (UID: "0494d495-b64c-456c-8424-e929353eea07"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.110960 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "0494d495-b64c-456c-8424-e929353eea07" (UID: "0494d495-b64c-456c-8424-e929353eea07"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.111742 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-inventory" (OuterVolumeSpecName: "inventory") pod "0494d495-b64c-456c-8424-e929353eea07" (UID: "0494d495-b64c-456c-8424-e929353eea07"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.176459 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.176488 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.176499 4861 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0494d495-b64c-456c-8424-e929353eea07-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.176509 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgx24\" (UniqueName: \"kubernetes.io/projected/0494d495-b64c-456c-8424-e929353eea07-kube-api-access-zgx24\") on node \"crc\" DevicePath \"\"" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.361105 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" event={"ID":"0494d495-b64c-456c-8424-e929353eea07","Type":"ContainerDied","Data":"201c44c3a4705c4b4059869361323bbaf2283369f926d21dd660bdc010c813f7"} Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.361416 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="201c44c3a4705c4b4059869361323bbaf2283369f926d21dd660bdc010c813f7" Jan 29 08:22:26 crc kubenswrapper[4861]: I0129 08:22:26.361145 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw" Jan 29 08:22:27 crc kubenswrapper[4861]: I0129 08:22:27.117533 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:22:27 crc kubenswrapper[4861]: E0129 08:22:27.118028 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.449625 4861 scope.go:117] "RemoveContainer" containerID="191a977e818785883cdf0b687bec9c15efe4f2173dbbc09289de8ddfbb11217b" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.509453 4861 scope.go:117] "RemoveContainer" containerID="5042cc160120c75223f991c846e077bc26235ddabc9dcb0dc0703f905f43e812" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.584111 4861 scope.go:117] "RemoveContainer" containerID="46f146531c9b0cbf14b74e6b0246566222a22b5bf963da1647bfb9ea8d893717" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.646620 4861 scope.go:117] "RemoveContainer" containerID="babc54de6768e8244f831a4a6e7b4dfca2609331577eb241df28ee9f6b189630" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.706911 4861 scope.go:117] "RemoveContainer" containerID="1fc84f198244b259b9881d2e1530e45d52061882eee70cbbbb97cc8c1cf2b0f1" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.754145 4861 scope.go:117] "RemoveContainer" containerID="333eeff7b98cb94e24451dea310c2f00abe649660fc1dec3488d8a4609c707fb" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.796791 4861 scope.go:117] "RemoveContainer" containerID="81ae4515af3ae7320f1b837589cc302de121bfbaa7e8e98d7230937f3815e406" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.834632 4861 scope.go:117] "RemoveContainer" containerID="9d1bd926bcc1bdeaf0acde340562efdec85cdf17ac5e4dc3ff65ce8c6d45483b" Jan 29 08:22:30 crc kubenswrapper[4861]: I0129 08:22:30.867629 4861 scope.go:117] "RemoveContainer" containerID="87f702658d1be2b04d8d96489bb5b91b28267a05491dd9d5d89f945726cb9bee" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.567027 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv"] Jan 29 08:22:32 crc kubenswrapper[4861]: E0129 08:22:32.568017 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0494d495-b64c-456c-8424-e929353eea07" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.568033 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0494d495-b64c-456c-8424-e929353eea07" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.568312 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0494d495-b64c-456c-8424-e929353eea07" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.569150 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.571917 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.572491 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.573170 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.574695 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.580903 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv"] Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.728582 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.728635 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82vr7\" (UniqueName: \"kubernetes.io/projected/7d1929c2-3989-426c-9af1-4c54abe0ab7e-kube-api-access-82vr7\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.729154 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.729477 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.831063 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.831166 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.831225 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.831245 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82vr7\" (UniqueName: \"kubernetes.io/projected/7d1929c2-3989-426c-9af1-4c54abe0ab7e-kube-api-access-82vr7\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.840218 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.843719 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.853201 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82vr7\" (UniqueName: \"kubernetes.io/projected/7d1929c2-3989-426c-9af1-4c54abe0ab7e-kube-api-access-82vr7\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.858269 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:32 crc kubenswrapper[4861]: I0129 08:22:32.905823 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:22:33 crc kubenswrapper[4861]: I0129 08:22:33.515682 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv"] Jan 29 08:22:34 crc kubenswrapper[4861]: I0129 08:22:34.462280 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" event={"ID":"7d1929c2-3989-426c-9af1-4c54abe0ab7e","Type":"ContainerStarted","Data":"b49c9c76fec9980e16f1e5b924bd2130cfaa03d7d7cc9f6b302336032389cf48"} Jan 29 08:22:34 crc kubenswrapper[4861]: I0129 08:22:34.462774 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" event={"ID":"7d1929c2-3989-426c-9af1-4c54abe0ab7e","Type":"ContainerStarted","Data":"61db711ac80f019b07cee3ffec16fecde9e467647c46b3cf377cac79038d517e"} Jan 29 08:22:34 crc kubenswrapper[4861]: I0129 08:22:34.492664 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" podStartSLOduration=2.031405217 podStartE2EDuration="2.492629192s" podCreationTimestamp="2026-01-29 08:22:32 +0000 UTC" firstStartedPulling="2026-01-29 08:22:33.521962974 +0000 UTC m=+6445.193457551" lastFinishedPulling="2026-01-29 08:22:33.983186939 +0000 UTC m=+6445.654681526" observedRunningTime="2026-01-29 08:22:34.484494768 +0000 UTC m=+6446.155989325" watchObservedRunningTime="2026-01-29 08:22:34.492629192 +0000 UTC m=+6446.164123779" Jan 29 08:22:38 crc kubenswrapper[4861]: I0129 08:22:38.042621 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-xd7m7"] Jan 29 08:22:38 crc kubenswrapper[4861]: I0129 08:22:38.054710 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-xd7m7"] Jan 29 08:22:39 crc kubenswrapper[4861]: I0129 08:22:39.139684 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd5cd313-b722-4d0c-a9e7-2c1122c963ab" path="/var/lib/kubelet/pods/bd5cd313-b722-4d0c-a9e7-2c1122c963ab/volumes" Jan 29 08:22:40 crc kubenswrapper[4861]: I0129 08:22:40.117345 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:22:40 crc kubenswrapper[4861]: E0129 08:22:40.118066 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:22:55 crc kubenswrapper[4861]: I0129 08:22:55.127681 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:22:55 crc kubenswrapper[4861]: E0129 08:22:55.129041 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:23:07 crc kubenswrapper[4861]: I0129 08:23:07.116169 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:23:07 crc kubenswrapper[4861]: E0129 08:23:07.116941 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:23:18 crc kubenswrapper[4861]: I0129 08:23:18.116539 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:23:18 crc kubenswrapper[4861]: E0129 08:23:18.117810 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:23:30 crc kubenswrapper[4861]: I0129 08:23:30.116629 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:23:30 crc kubenswrapper[4861]: E0129 08:23:30.117478 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:23:31 crc kubenswrapper[4861]: I0129 08:23:31.171972 4861 scope.go:117] "RemoveContainer" containerID="5f0a41d5cd8db86bd32d2498b8dff05ee125f59df04602f72d0d362bd6da646f" Jan 29 08:23:31 crc kubenswrapper[4861]: I0129 08:23:31.214047 4861 scope.go:117] "RemoveContainer" containerID="4986a34b5afca44e37725ce5594d8ad3779a0ef9e65be9a33d47ba1449c0bce7" Jan 29 08:23:31 crc kubenswrapper[4861]: I0129 08:23:31.270294 4861 scope.go:117] "RemoveContainer" containerID="c40ed166c4cfef1a3d3565d9714911564ef62d0b27f702ba4c4e48738d9257c0" Jan 29 08:23:31 crc kubenswrapper[4861]: I0129 08:23:31.315210 4861 scope.go:117] "RemoveContainer" containerID="45a9cde1b6b107e4b902f53854d02c94608ec362df26d428dd61902d58efe914" Jan 29 08:23:44 crc kubenswrapper[4861]: I0129 08:23:44.117146 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:23:44 crc kubenswrapper[4861]: E0129 08:23:44.118019 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.500758 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-84bsj"] Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.507827 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.520408 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-84bsj"] Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.590687 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-utilities\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.590806 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-catalog-content\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.590994 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztctd\" (UniqueName: \"kubernetes.io/projected/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-kube-api-access-ztctd\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.693183 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-catalog-content\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.693314 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztctd\" (UniqueName: \"kubernetes.io/projected/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-kube-api-access-ztctd\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.693386 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-utilities\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.693824 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-utilities\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.693825 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-catalog-content\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.714178 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztctd\" (UniqueName: \"kubernetes.io/projected/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-kube-api-access-ztctd\") pod \"redhat-marketplace-84bsj\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:53 crc kubenswrapper[4861]: I0129 08:23:53.851163 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:23:54 crc kubenswrapper[4861]: I0129 08:23:54.336583 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-84bsj"] Jan 29 08:23:55 crc kubenswrapper[4861]: I0129 08:23:55.318321 4861 generic.go:334] "Generic (PLEG): container finished" podID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerID="4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264" exitCode=0 Jan 29 08:23:55 crc kubenswrapper[4861]: I0129 08:23:55.318502 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-84bsj" event={"ID":"b0d39c0f-2030-426c-85a8-c7e4b2d54dff","Type":"ContainerDied","Data":"4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264"} Jan 29 08:23:55 crc kubenswrapper[4861]: I0129 08:23:55.318935 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-84bsj" event={"ID":"b0d39c0f-2030-426c-85a8-c7e4b2d54dff","Type":"ContainerStarted","Data":"ddf24af89a08fdf86cc7ce917b6bc3e0f0f8a5761cc8a51d57d57976b42eac8b"} Jan 29 08:23:55 crc kubenswrapper[4861]: I0129 08:23:55.322371 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:23:56 crc kubenswrapper[4861]: I0129 08:23:56.117145 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:23:56 crc kubenswrapper[4861]: E0129 08:23:56.118020 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:23:57 crc kubenswrapper[4861]: I0129 08:23:57.348490 4861 generic.go:334] "Generic (PLEG): container finished" podID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerID="32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f" exitCode=0 Jan 29 08:23:57 crc kubenswrapper[4861]: I0129 08:23:57.348596 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-84bsj" event={"ID":"b0d39c0f-2030-426c-85a8-c7e4b2d54dff","Type":"ContainerDied","Data":"32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f"} Jan 29 08:23:58 crc kubenswrapper[4861]: I0129 08:23:58.359586 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-84bsj" event={"ID":"b0d39c0f-2030-426c-85a8-c7e4b2d54dff","Type":"ContainerStarted","Data":"2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d"} Jan 29 08:23:58 crc kubenswrapper[4861]: I0129 08:23:58.382753 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-84bsj" podStartSLOduration=2.854703363 podStartE2EDuration="5.382732904s" podCreationTimestamp="2026-01-29 08:23:53 +0000 UTC" firstStartedPulling="2026-01-29 08:23:55.322086401 +0000 UTC m=+6526.993580958" lastFinishedPulling="2026-01-29 08:23:57.850115942 +0000 UTC m=+6529.521610499" observedRunningTime="2026-01-29 08:23:58.376508881 +0000 UTC m=+6530.048003438" watchObservedRunningTime="2026-01-29 08:23:58.382732904 +0000 UTC m=+6530.054227471" Jan 29 08:24:03 crc kubenswrapper[4861]: I0129 08:24:03.852199 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:24:03 crc kubenswrapper[4861]: I0129 08:24:03.853003 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:24:03 crc kubenswrapper[4861]: I0129 08:24:03.902738 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:24:04 crc kubenswrapper[4861]: I0129 08:24:04.503559 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:24:04 crc kubenswrapper[4861]: I0129 08:24:04.566129 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-84bsj"] Jan 29 08:24:06 crc kubenswrapper[4861]: I0129 08:24:06.442582 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-84bsj" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerName="registry-server" containerID="cri-o://2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d" gracePeriod=2 Jan 29 08:24:06 crc kubenswrapper[4861]: I0129 08:24:06.991590 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.092864 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-utilities\") pod \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.092952 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-catalog-content\") pod \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.093170 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztctd\" (UniqueName: \"kubernetes.io/projected/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-kube-api-access-ztctd\") pod \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\" (UID: \"b0d39c0f-2030-426c-85a8-c7e4b2d54dff\") " Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.094645 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-utilities" (OuterVolumeSpecName: "utilities") pod "b0d39c0f-2030-426c-85a8-c7e4b2d54dff" (UID: "b0d39c0f-2030-426c-85a8-c7e4b2d54dff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.101795 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-kube-api-access-ztctd" (OuterVolumeSpecName: "kube-api-access-ztctd") pod "b0d39c0f-2030-426c-85a8-c7e4b2d54dff" (UID: "b0d39c0f-2030-426c-85a8-c7e4b2d54dff"). InnerVolumeSpecName "kube-api-access-ztctd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.112291 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b0d39c0f-2030-426c-85a8-c7e4b2d54dff" (UID: "b0d39c0f-2030-426c-85a8-c7e4b2d54dff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.197696 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztctd\" (UniqueName: \"kubernetes.io/projected/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-kube-api-access-ztctd\") on node \"crc\" DevicePath \"\"" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.197752 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.197772 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0d39c0f-2030-426c-85a8-c7e4b2d54dff-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.458362 4861 generic.go:334] "Generic (PLEG): container finished" podID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerID="2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d" exitCode=0 Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.458508 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-84bsj" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.458508 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-84bsj" event={"ID":"b0d39c0f-2030-426c-85a8-c7e4b2d54dff","Type":"ContainerDied","Data":"2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d"} Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.458862 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-84bsj" event={"ID":"b0d39c0f-2030-426c-85a8-c7e4b2d54dff","Type":"ContainerDied","Data":"ddf24af89a08fdf86cc7ce917b6bc3e0f0f8a5761cc8a51d57d57976b42eac8b"} Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.458890 4861 scope.go:117] "RemoveContainer" containerID="2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.487316 4861 scope.go:117] "RemoveContainer" containerID="32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.489201 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-84bsj"] Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.499020 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-84bsj"] Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.517140 4861 scope.go:117] "RemoveContainer" containerID="4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.567485 4861 scope.go:117] "RemoveContainer" containerID="2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d" Jan 29 08:24:07 crc kubenswrapper[4861]: E0129 08:24:07.568005 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d\": container with ID starting with 2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d not found: ID does not exist" containerID="2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.568109 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d"} err="failed to get container status \"2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d\": rpc error: code = NotFound desc = could not find container \"2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d\": container with ID starting with 2d0e1e5be0dad1d88a617afd4b5ddfde8e803d74d5d5147f2a98c1fa959d682d not found: ID does not exist" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.568170 4861 scope.go:117] "RemoveContainer" containerID="32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f" Jan 29 08:24:07 crc kubenswrapper[4861]: E0129 08:24:07.568502 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f\": container with ID starting with 32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f not found: ID does not exist" containerID="32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.568534 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f"} err="failed to get container status \"32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f\": rpc error: code = NotFound desc = could not find container \"32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f\": container with ID starting with 32b13b143e51094cb07ed0b560de6ac2a144ccc2380918d44889fbe28e1c8c4f not found: ID does not exist" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.568553 4861 scope.go:117] "RemoveContainer" containerID="4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264" Jan 29 08:24:07 crc kubenswrapper[4861]: E0129 08:24:07.568890 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264\": container with ID starting with 4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264 not found: ID does not exist" containerID="4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264" Jan 29 08:24:07 crc kubenswrapper[4861]: I0129 08:24:07.568940 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264"} err="failed to get container status \"4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264\": rpc error: code = NotFound desc = could not find container \"4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264\": container with ID starting with 4484cc6d2c5621cab45feedc4407cc64cec5c796c4f1b2f3a45b6a4afed23264 not found: ID does not exist" Jan 29 08:24:09 crc kubenswrapper[4861]: I0129 08:24:09.128862 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" path="/var/lib/kubelet/pods/b0d39c0f-2030-426c-85a8-c7e4b2d54dff/volumes" Jan 29 08:24:11 crc kubenswrapper[4861]: I0129 08:24:11.118424 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:24:11 crc kubenswrapper[4861]: E0129 08:24:11.119129 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:24:23 crc kubenswrapper[4861]: I0129 08:24:23.045689 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-glm78"] Jan 29 08:24:23 crc kubenswrapper[4861]: I0129 08:24:23.060644 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-glm78"] Jan 29 08:24:23 crc kubenswrapper[4861]: I0129 08:24:23.128675 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="399da928-5e53-4b19-8d3c-464215b26f81" path="/var/lib/kubelet/pods/399da928-5e53-4b19-8d3c-464215b26f81/volumes" Jan 29 08:24:24 crc kubenswrapper[4861]: I0129 08:24:24.037501 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-09f1-account-create-update-xxwpk"] Jan 29 08:24:24 crc kubenswrapper[4861]: I0129 08:24:24.046661 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-09f1-account-create-update-xxwpk"] Jan 29 08:24:24 crc kubenswrapper[4861]: I0129 08:24:24.116910 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:24:24 crc kubenswrapper[4861]: E0129 08:24:24.117197 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:24:25 crc kubenswrapper[4861]: I0129 08:24:25.135853 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f50d96e4-61f0-48b2-af43-0f8401cd2bcb" path="/var/lib/kubelet/pods/f50d96e4-61f0-48b2-af43-0f8401cd2bcb/volumes" Jan 29 08:24:30 crc kubenswrapper[4861]: I0129 08:24:30.039919 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-mvwwh"] Jan 29 08:24:30 crc kubenswrapper[4861]: I0129 08:24:30.054629 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-mvwwh"] Jan 29 08:24:30 crc kubenswrapper[4861]: I0129 08:24:30.064402 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-047c-account-create-update-z9kr2"] Jan 29 08:24:30 crc kubenswrapper[4861]: I0129 08:24:30.074368 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-047c-account-create-update-z9kr2"] Jan 29 08:24:31 crc kubenswrapper[4861]: I0129 08:24:31.135678 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38e9508e-d706-4b9e-9d40-91535eb221c6" path="/var/lib/kubelet/pods/38e9508e-d706-4b9e-9d40-91535eb221c6/volumes" Jan 29 08:24:31 crc kubenswrapper[4861]: I0129 08:24:31.139108 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecefac8f-faec-489d-9c82-62e0ccd17d16" path="/var/lib/kubelet/pods/ecefac8f-faec-489d-9c82-62e0ccd17d16/volumes" Jan 29 08:24:31 crc kubenswrapper[4861]: I0129 08:24:31.426362 4861 scope.go:117] "RemoveContainer" containerID="42c5b350709293513348f6e0fcb9f1276f4ad764469c9e4d7b1672cfd9c3fe52" Jan 29 08:24:31 crc kubenswrapper[4861]: I0129 08:24:31.455389 4861 scope.go:117] "RemoveContainer" containerID="d17087f7c4bd3135816851700890f8eda3ad7bda848c1b8f20e450722dd796ee" Jan 29 08:24:31 crc kubenswrapper[4861]: I0129 08:24:31.524393 4861 scope.go:117] "RemoveContainer" containerID="d0420f79ccbf297db6d8885e86b532dca6e8f22c5019900c56af46fd31995c71" Jan 29 08:24:31 crc kubenswrapper[4861]: I0129 08:24:31.564938 4861 scope.go:117] "RemoveContainer" containerID="e65db8bedabb7aa4c88e2488663c9d8189453dd52c355a02369b4054a76b68cf" Jan 29 08:24:36 crc kubenswrapper[4861]: I0129 08:24:36.116401 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:24:36 crc kubenswrapper[4861]: E0129 08:24:36.117151 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:24:49 crc kubenswrapper[4861]: I0129 08:24:49.125748 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:24:49 crc kubenswrapper[4861]: E0129 08:24:49.126799 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:25:01 crc kubenswrapper[4861]: I0129 08:25:01.117734 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:25:01 crc kubenswrapper[4861]: E0129 08:25:01.118547 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:25:13 crc kubenswrapper[4861]: I0129 08:25:13.042605 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-f2bgs"] Jan 29 08:25:13 crc kubenswrapper[4861]: I0129 08:25:13.055687 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-f2bgs"] Jan 29 08:25:13 crc kubenswrapper[4861]: I0129 08:25:13.116563 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:25:13 crc kubenswrapper[4861]: E0129 08:25:13.116827 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:25:13 crc kubenswrapper[4861]: I0129 08:25:13.131681 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="faf89663-f5be-49c1-8c26-6317e2ebd435" path="/var/lib/kubelet/pods/faf89663-f5be-49c1-8c26-6317e2ebd435/volumes" Jan 29 08:25:24 crc kubenswrapper[4861]: I0129 08:25:24.116373 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:25:24 crc kubenswrapper[4861]: E0129 08:25:24.117171 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:25:31 crc kubenswrapper[4861]: I0129 08:25:31.733526 4861 scope.go:117] "RemoveContainer" containerID="31e37f3853942b4de0e6066f1d115606cbeee90fb6cbb564ae6a2ad5dee7e99d" Jan 29 08:25:31 crc kubenswrapper[4861]: I0129 08:25:31.771092 4861 scope.go:117] "RemoveContainer" containerID="f27442c9044066c0d00bb83daac2585ec87ae652b0530dc27835bb142a05ed7d" Jan 29 08:25:38 crc kubenswrapper[4861]: I0129 08:25:38.117436 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:25:38 crc kubenswrapper[4861]: E0129 08:25:38.118191 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:25:49 crc kubenswrapper[4861]: I0129 08:25:49.122378 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:25:49 crc kubenswrapper[4861]: E0129 08:25:49.123206 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:26:03 crc kubenswrapper[4861]: I0129 08:26:03.117299 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:26:03 crc kubenswrapper[4861]: E0129 08:26:03.118496 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.856242 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cfmjh"] Jan 29 08:26:09 crc kubenswrapper[4861]: E0129 08:26:09.857431 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerName="extract-utilities" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.857446 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerName="extract-utilities" Jan 29 08:26:09 crc kubenswrapper[4861]: E0129 08:26:09.857462 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerName="registry-server" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.857470 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerName="registry-server" Jan 29 08:26:09 crc kubenswrapper[4861]: E0129 08:26:09.857485 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerName="extract-content" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.857494 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerName="extract-content" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.857756 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0d39c0f-2030-426c-85a8-c7e4b2d54dff" containerName="registry-server" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.859680 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.869153 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-utilities\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.869371 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgd9l\" (UniqueName: \"kubernetes.io/projected/18f02edb-dcaf-4c8c-9700-56065b70cb9f-kube-api-access-tgd9l\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.869437 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-catalog-content\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.891160 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cfmjh"] Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.971705 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-utilities\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.972061 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgd9l\" (UniqueName: \"kubernetes.io/projected/18f02edb-dcaf-4c8c-9700-56065b70cb9f-kube-api-access-tgd9l\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.972828 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-catalog-content\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.972279 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-utilities\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.973237 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-catalog-content\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:09 crc kubenswrapper[4861]: I0129 08:26:09.995226 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgd9l\" (UniqueName: \"kubernetes.io/projected/18f02edb-dcaf-4c8c-9700-56065b70cb9f-kube-api-access-tgd9l\") pod \"community-operators-cfmjh\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:10 crc kubenswrapper[4861]: I0129 08:26:10.209031 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:10 crc kubenswrapper[4861]: I0129 08:26:10.754341 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cfmjh"] Jan 29 08:26:10 crc kubenswrapper[4861]: I0129 08:26:10.767165 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmjh" event={"ID":"18f02edb-dcaf-4c8c-9700-56065b70cb9f","Type":"ContainerStarted","Data":"7d0f5fa5385f15947905756c7a2a1078d303ccb5d2e2b3e190dbb141bd61c2ff"} Jan 29 08:26:11 crc kubenswrapper[4861]: I0129 08:26:11.781815 4861 generic.go:334] "Generic (PLEG): container finished" podID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerID="a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e" exitCode=0 Jan 29 08:26:11 crc kubenswrapper[4861]: I0129 08:26:11.781907 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmjh" event={"ID":"18f02edb-dcaf-4c8c-9700-56065b70cb9f","Type":"ContainerDied","Data":"a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e"} Jan 29 08:26:13 crc kubenswrapper[4861]: I0129 08:26:13.813544 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmjh" event={"ID":"18f02edb-dcaf-4c8c-9700-56065b70cb9f","Type":"ContainerStarted","Data":"f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a"} Jan 29 08:26:16 crc kubenswrapper[4861]: I0129 08:26:16.117787 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:26:16 crc kubenswrapper[4861]: E0129 08:26:16.118688 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:26:16 crc kubenswrapper[4861]: I0129 08:26:16.844382 4861 generic.go:334] "Generic (PLEG): container finished" podID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerID="f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a" exitCode=0 Jan 29 08:26:16 crc kubenswrapper[4861]: I0129 08:26:16.844489 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmjh" event={"ID":"18f02edb-dcaf-4c8c-9700-56065b70cb9f","Type":"ContainerDied","Data":"f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a"} Jan 29 08:26:18 crc kubenswrapper[4861]: I0129 08:26:18.869440 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmjh" event={"ID":"18f02edb-dcaf-4c8c-9700-56065b70cb9f","Type":"ContainerStarted","Data":"72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb"} Jan 29 08:26:18 crc kubenswrapper[4861]: I0129 08:26:18.896583 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cfmjh" podStartSLOduration=3.9356970479999998 podStartE2EDuration="9.896565224s" podCreationTimestamp="2026-01-29 08:26:09 +0000 UTC" firstStartedPulling="2026-01-29 08:26:11.783886326 +0000 UTC m=+6663.455380883" lastFinishedPulling="2026-01-29 08:26:17.744754462 +0000 UTC m=+6669.416249059" observedRunningTime="2026-01-29 08:26:18.892455576 +0000 UTC m=+6670.563950153" watchObservedRunningTime="2026-01-29 08:26:18.896565224 +0000 UTC m=+6670.568059781" Jan 29 08:26:20 crc kubenswrapper[4861]: I0129 08:26:20.210236 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:20 crc kubenswrapper[4861]: I0129 08:26:20.210601 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:20 crc kubenswrapper[4861]: I0129 08:26:20.268540 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:29 crc kubenswrapper[4861]: I0129 08:26:29.124577 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:26:29 crc kubenswrapper[4861]: E0129 08:26:29.127238 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:26:30 crc kubenswrapper[4861]: I0129 08:26:30.317440 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:30 crc kubenswrapper[4861]: I0129 08:26:30.386768 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cfmjh"] Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.005366 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cfmjh" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerName="registry-server" containerID="cri-o://72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb" gracePeriod=2 Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.575380 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.704711 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-utilities\") pod \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.705030 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-catalog-content\") pod \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.705104 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgd9l\" (UniqueName: \"kubernetes.io/projected/18f02edb-dcaf-4c8c-9700-56065b70cb9f-kube-api-access-tgd9l\") pod \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\" (UID: \"18f02edb-dcaf-4c8c-9700-56065b70cb9f\") " Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.705944 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-utilities" (OuterVolumeSpecName: "utilities") pod "18f02edb-dcaf-4c8c-9700-56065b70cb9f" (UID: "18f02edb-dcaf-4c8c-9700-56065b70cb9f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.707682 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.712155 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18f02edb-dcaf-4c8c-9700-56065b70cb9f-kube-api-access-tgd9l" (OuterVolumeSpecName: "kube-api-access-tgd9l") pod "18f02edb-dcaf-4c8c-9700-56065b70cb9f" (UID: "18f02edb-dcaf-4c8c-9700-56065b70cb9f"). InnerVolumeSpecName "kube-api-access-tgd9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.764647 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "18f02edb-dcaf-4c8c-9700-56065b70cb9f" (UID: "18f02edb-dcaf-4c8c-9700-56065b70cb9f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.810388 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/18f02edb-dcaf-4c8c-9700-56065b70cb9f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:26:31 crc kubenswrapper[4861]: I0129 08:26:31.810422 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgd9l\" (UniqueName: \"kubernetes.io/projected/18f02edb-dcaf-4c8c-9700-56065b70cb9f-kube-api-access-tgd9l\") on node \"crc\" DevicePath \"\"" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.020099 4861 generic.go:334] "Generic (PLEG): container finished" podID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerID="72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb" exitCode=0 Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.020148 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmjh" event={"ID":"18f02edb-dcaf-4c8c-9700-56065b70cb9f","Type":"ContainerDied","Data":"72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb"} Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.020171 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfmjh" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.020190 4861 scope.go:117] "RemoveContainer" containerID="72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.020178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmjh" event={"ID":"18f02edb-dcaf-4c8c-9700-56065b70cb9f","Type":"ContainerDied","Data":"7d0f5fa5385f15947905756c7a2a1078d303ccb5d2e2b3e190dbb141bd61c2ff"} Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.043910 4861 scope.go:117] "RemoveContainer" containerID="f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.072044 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cfmjh"] Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.082261 4861 scope.go:117] "RemoveContainer" containerID="a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.087724 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cfmjh"] Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.132364 4861 scope.go:117] "RemoveContainer" containerID="72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb" Jan 29 08:26:32 crc kubenswrapper[4861]: E0129 08:26:32.133345 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb\": container with ID starting with 72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb not found: ID does not exist" containerID="72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.133397 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb"} err="failed to get container status \"72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb\": rpc error: code = NotFound desc = could not find container \"72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb\": container with ID starting with 72761673992c3f3cfff50c0483400224e06c69a1d3cdb631ccf8006a7643dccb not found: ID does not exist" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.133428 4861 scope.go:117] "RemoveContainer" containerID="f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a" Jan 29 08:26:32 crc kubenswrapper[4861]: E0129 08:26:32.138210 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a\": container with ID starting with f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a not found: ID does not exist" containerID="f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.138245 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a"} err="failed to get container status \"f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a\": rpc error: code = NotFound desc = could not find container \"f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a\": container with ID starting with f5f5861fc474d6eed31b74a9b4491324d0ffb3e26441afd0776671884b0a4d2a not found: ID does not exist" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.138284 4861 scope.go:117] "RemoveContainer" containerID="a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e" Jan 29 08:26:32 crc kubenswrapper[4861]: E0129 08:26:32.138724 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e\": container with ID starting with a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e not found: ID does not exist" containerID="a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e" Jan 29 08:26:32 crc kubenswrapper[4861]: I0129 08:26:32.138746 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e"} err="failed to get container status \"a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e\": rpc error: code = NotFound desc = could not find container \"a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e\": container with ID starting with a380e15492533342f98b3486138dad351cb08d0bcfe29ded3129cbb813c6316e not found: ID does not exist" Jan 29 08:26:33 crc kubenswrapper[4861]: I0129 08:26:33.127191 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" path="/var/lib/kubelet/pods/18f02edb-dcaf-4c8c-9700-56065b70cb9f/volumes" Jan 29 08:26:44 crc kubenswrapper[4861]: I0129 08:26:44.116758 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:26:45 crc kubenswrapper[4861]: I0129 08:26:45.155804 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"30135386b1ff7ecf0c082bc48e918189cfa4474d420f76eda1cd94f3d751fc19"} Jan 29 08:27:57 crc kubenswrapper[4861]: I0129 08:27:57.070176 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-7ed2-account-create-update-98kvj"] Jan 29 08:27:57 crc kubenswrapper[4861]: I0129 08:27:57.091197 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-9sql6"] Jan 29 08:27:57 crc kubenswrapper[4861]: I0129 08:27:57.129420 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-9sql6"] Jan 29 08:27:57 crc kubenswrapper[4861]: I0129 08:27:57.129456 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-7ed2-account-create-update-98kvj"] Jan 29 08:27:59 crc kubenswrapper[4861]: I0129 08:27:59.139169 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2825b4e3-4159-492d-87f0-90fc88b8c345" path="/var/lib/kubelet/pods/2825b4e3-4159-492d-87f0-90fc88b8c345/volumes" Jan 29 08:27:59 crc kubenswrapper[4861]: I0129 08:27:59.141593 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcbbac74-0f1c-4d78-a3be-7a6c25933bd2" path="/var/lib/kubelet/pods/fcbbac74-0f1c-4d78-a3be-7a6c25933bd2/volumes" Jan 29 08:28:11 crc kubenswrapper[4861]: I0129 08:28:11.066132 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-lmzn7"] Jan 29 08:28:11 crc kubenswrapper[4861]: I0129 08:28:11.078697 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-lmzn7"] Jan 29 08:28:11 crc kubenswrapper[4861]: I0129 08:28:11.128551 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40129c18-e920-4ead-a7d2-27d1c2c442ab" path="/var/lib/kubelet/pods/40129c18-e920-4ead-a7d2-27d1c2c442ab/volumes" Jan 29 08:28:31 crc kubenswrapper[4861]: I0129 08:28:31.941206 4861 scope.go:117] "RemoveContainer" containerID="32586d795f9a74c4b8d577a6391b61598e554c87d4c33ddc7ab5325084a00786" Jan 29 08:28:31 crc kubenswrapper[4861]: I0129 08:28:31.971063 4861 scope.go:117] "RemoveContainer" containerID="741f43c77976cd1aeda1d3c9c60155410d344c4260908316596da8caf7e66462" Jan 29 08:28:32 crc kubenswrapper[4861]: I0129 08:28:32.043500 4861 scope.go:117] "RemoveContainer" containerID="3c71dc347553eb11e9289703482de50a5c3af907fd475bace6095a34dc7fac58" Jan 29 08:29:00 crc kubenswrapper[4861]: I0129 08:29:00.629592 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:29:00 crc kubenswrapper[4861]: I0129 08:29:00.630404 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.274124 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lw8sp"] Jan 29 08:29:13 crc kubenswrapper[4861]: E0129 08:29:13.275202 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerName="extract-utilities" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.275218 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerName="extract-utilities" Jan 29 08:29:13 crc kubenswrapper[4861]: E0129 08:29:13.275232 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerName="registry-server" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.275240 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerName="registry-server" Jan 29 08:29:13 crc kubenswrapper[4861]: E0129 08:29:13.275286 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerName="extract-content" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.275295 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerName="extract-content" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.275548 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="18f02edb-dcaf-4c8c-9700-56065b70cb9f" containerName="registry-server" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.277484 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.297135 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lw8sp"] Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.303119 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phg2r\" (UniqueName: \"kubernetes.io/projected/89f344e5-be53-4158-8416-e0a62b697bc0-kube-api-access-phg2r\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.303673 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-utilities\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.303870 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-catalog-content\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.405795 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-catalog-content\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.405959 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phg2r\" (UniqueName: \"kubernetes.io/projected/89f344e5-be53-4158-8416-e0a62b697bc0-kube-api-access-phg2r\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.406146 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-utilities\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.406448 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-catalog-content\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.406682 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-utilities\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.429786 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phg2r\" (UniqueName: \"kubernetes.io/projected/89f344e5-be53-4158-8416-e0a62b697bc0-kube-api-access-phg2r\") pod \"redhat-operators-lw8sp\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:13 crc kubenswrapper[4861]: I0129 08:29:13.625352 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:14 crc kubenswrapper[4861]: I0129 08:29:14.138645 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lw8sp"] Jan 29 08:29:14 crc kubenswrapper[4861]: I0129 08:29:14.828741 4861 generic.go:334] "Generic (PLEG): container finished" podID="89f344e5-be53-4158-8416-e0a62b697bc0" containerID="ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e" exitCode=0 Jan 29 08:29:14 crc kubenswrapper[4861]: I0129 08:29:14.828826 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw8sp" event={"ID":"89f344e5-be53-4158-8416-e0a62b697bc0","Type":"ContainerDied","Data":"ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e"} Jan 29 08:29:14 crc kubenswrapper[4861]: I0129 08:29:14.829043 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw8sp" event={"ID":"89f344e5-be53-4158-8416-e0a62b697bc0","Type":"ContainerStarted","Data":"afea3fc2b60de8ccdf599a9530b786a51c2380ff6e60740609432cf8c1cb3f1b"} Jan 29 08:29:14 crc kubenswrapper[4861]: I0129 08:29:14.830889 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.670402 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4vm9n"] Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.672770 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.681776 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4vm9n"] Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.753816 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-utilities\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.754209 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5jvt\" (UniqueName: \"kubernetes.io/projected/1a8e9a9e-955b-401d-80eb-e8556e3a5269-kube-api-access-g5jvt\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.754425 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-catalog-content\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.838861 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw8sp" event={"ID":"89f344e5-be53-4158-8416-e0a62b697bc0","Type":"ContainerStarted","Data":"9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245"} Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.857216 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-utilities\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.857368 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5jvt\" (UniqueName: \"kubernetes.io/projected/1a8e9a9e-955b-401d-80eb-e8556e3a5269-kube-api-access-g5jvt\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.857449 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-catalog-content\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.857856 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-utilities\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.857944 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-catalog-content\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.878111 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5jvt\" (UniqueName: \"kubernetes.io/projected/1a8e9a9e-955b-401d-80eb-e8556e3a5269-kube-api-access-g5jvt\") pod \"certified-operators-4vm9n\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:15 crc kubenswrapper[4861]: I0129 08:29:15.996427 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:16 crc kubenswrapper[4861]: I0129 08:29:16.528763 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4vm9n"] Jan 29 08:29:16 crc kubenswrapper[4861]: W0129 08:29:16.530766 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a8e9a9e_955b_401d_80eb_e8556e3a5269.slice/crio-f1b46ace197e914bffa03911258897c2bc6236da9f7dc7fe25f9955fba479781 WatchSource:0}: Error finding container f1b46ace197e914bffa03911258897c2bc6236da9f7dc7fe25f9955fba479781: Status 404 returned error can't find the container with id f1b46ace197e914bffa03911258897c2bc6236da9f7dc7fe25f9955fba479781 Jan 29 08:29:16 crc kubenswrapper[4861]: I0129 08:29:16.852596 4861 generic.go:334] "Generic (PLEG): container finished" podID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerID="2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce" exitCode=0 Jan 29 08:29:16 crc kubenswrapper[4861]: I0129 08:29:16.852701 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vm9n" event={"ID":"1a8e9a9e-955b-401d-80eb-e8556e3a5269","Type":"ContainerDied","Data":"2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce"} Jan 29 08:29:16 crc kubenswrapper[4861]: I0129 08:29:16.852809 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vm9n" event={"ID":"1a8e9a9e-955b-401d-80eb-e8556e3a5269","Type":"ContainerStarted","Data":"f1b46ace197e914bffa03911258897c2bc6236da9f7dc7fe25f9955fba479781"} Jan 29 08:29:18 crc kubenswrapper[4861]: I0129 08:29:18.872994 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vm9n" event={"ID":"1a8e9a9e-955b-401d-80eb-e8556e3a5269","Type":"ContainerStarted","Data":"84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45"} Jan 29 08:29:21 crc kubenswrapper[4861]: I0129 08:29:21.904933 4861 generic.go:334] "Generic (PLEG): container finished" podID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerID="84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45" exitCode=0 Jan 29 08:29:21 crc kubenswrapper[4861]: I0129 08:29:21.905024 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vm9n" event={"ID":"1a8e9a9e-955b-401d-80eb-e8556e3a5269","Type":"ContainerDied","Data":"84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45"} Jan 29 08:29:23 crc kubenswrapper[4861]: I0129 08:29:23.931586 4861 generic.go:334] "Generic (PLEG): container finished" podID="89f344e5-be53-4158-8416-e0a62b697bc0" containerID="9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245" exitCode=0 Jan 29 08:29:23 crc kubenswrapper[4861]: I0129 08:29:23.931661 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw8sp" event={"ID":"89f344e5-be53-4158-8416-e0a62b697bc0","Type":"ContainerDied","Data":"9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245"} Jan 29 08:29:23 crc kubenswrapper[4861]: I0129 08:29:23.936603 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vm9n" event={"ID":"1a8e9a9e-955b-401d-80eb-e8556e3a5269","Type":"ContainerStarted","Data":"b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5"} Jan 29 08:29:23 crc kubenswrapper[4861]: I0129 08:29:23.994799 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4vm9n" podStartSLOduration=3.265253031 podStartE2EDuration="8.994771346s" podCreationTimestamp="2026-01-29 08:29:15 +0000 UTC" firstStartedPulling="2026-01-29 08:29:16.855735882 +0000 UTC m=+6848.527230439" lastFinishedPulling="2026-01-29 08:29:22.585254187 +0000 UTC m=+6854.256748754" observedRunningTime="2026-01-29 08:29:23.983467868 +0000 UTC m=+6855.654962435" watchObservedRunningTime="2026-01-29 08:29:23.994771346 +0000 UTC m=+6855.666265903" Jan 29 08:29:24 crc kubenswrapper[4861]: I0129 08:29:24.946414 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw8sp" event={"ID":"89f344e5-be53-4158-8416-e0a62b697bc0","Type":"ContainerStarted","Data":"d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7"} Jan 29 08:29:24 crc kubenswrapper[4861]: I0129 08:29:24.963995 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lw8sp" podStartSLOduration=2.448180393 podStartE2EDuration="11.963980379s" podCreationTimestamp="2026-01-29 08:29:13 +0000 UTC" firstStartedPulling="2026-01-29 08:29:14.830647451 +0000 UTC m=+6846.502142008" lastFinishedPulling="2026-01-29 08:29:24.346447437 +0000 UTC m=+6856.017941994" observedRunningTime="2026-01-29 08:29:24.963310702 +0000 UTC m=+6856.634805269" watchObservedRunningTime="2026-01-29 08:29:24.963980379 +0000 UTC m=+6856.635474936" Jan 29 08:29:25 crc kubenswrapper[4861]: I0129 08:29:25.997526 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:25 crc kubenswrapper[4861]: I0129 08:29:25.997597 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:27 crc kubenswrapper[4861]: I0129 08:29:27.055809 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-4vm9n" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="registry-server" probeResult="failure" output=< Jan 29 08:29:27 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:29:27 crc kubenswrapper[4861]: > Jan 29 08:29:30 crc kubenswrapper[4861]: I0129 08:29:30.629794 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:29:30 crc kubenswrapper[4861]: I0129 08:29:30.630394 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:29:33 crc kubenswrapper[4861]: I0129 08:29:33.625694 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:33 crc kubenswrapper[4861]: I0129 08:29:33.626294 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:29:34 crc kubenswrapper[4861]: I0129 08:29:34.676881 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lw8sp" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="registry-server" probeResult="failure" output=< Jan 29 08:29:34 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:29:34 crc kubenswrapper[4861]: > Jan 29 08:29:37 crc kubenswrapper[4861]: I0129 08:29:37.073666 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-4vm9n" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="registry-server" probeResult="failure" output=< Jan 29 08:29:37 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:29:37 crc kubenswrapper[4861]: > Jan 29 08:29:44 crc kubenswrapper[4861]: I0129 08:29:44.677096 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lw8sp" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="registry-server" probeResult="failure" output=< Jan 29 08:29:44 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:29:44 crc kubenswrapper[4861]: > Jan 29 08:29:46 crc kubenswrapper[4861]: I0129 08:29:46.076780 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:46 crc kubenswrapper[4861]: I0129 08:29:46.144915 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:46 crc kubenswrapper[4861]: I0129 08:29:46.873564 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4vm9n"] Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.139242 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4vm9n" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="registry-server" containerID="cri-o://b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5" gracePeriod=2 Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.653151 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.775459 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-utilities\") pod \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.775527 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5jvt\" (UniqueName: \"kubernetes.io/projected/1a8e9a9e-955b-401d-80eb-e8556e3a5269-kube-api-access-g5jvt\") pod \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.775711 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-catalog-content\") pod \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\" (UID: \"1a8e9a9e-955b-401d-80eb-e8556e3a5269\") " Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.776315 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-utilities" (OuterVolumeSpecName: "utilities") pod "1a8e9a9e-955b-401d-80eb-e8556e3a5269" (UID: "1a8e9a9e-955b-401d-80eb-e8556e3a5269"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.777784 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.781725 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a8e9a9e-955b-401d-80eb-e8556e3a5269-kube-api-access-g5jvt" (OuterVolumeSpecName: "kube-api-access-g5jvt") pod "1a8e9a9e-955b-401d-80eb-e8556e3a5269" (UID: "1a8e9a9e-955b-401d-80eb-e8556e3a5269"). InnerVolumeSpecName "kube-api-access-g5jvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.837335 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1a8e9a9e-955b-401d-80eb-e8556e3a5269" (UID: "1a8e9a9e-955b-401d-80eb-e8556e3a5269"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.880287 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5jvt\" (UniqueName: \"kubernetes.io/projected/1a8e9a9e-955b-401d-80eb-e8556e3a5269-kube-api-access-g5jvt\") on node \"crc\" DevicePath \"\"" Jan 29 08:29:47 crc kubenswrapper[4861]: I0129 08:29:47.880324 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a8e9a9e-955b-401d-80eb-e8556e3a5269-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.156154 4861 generic.go:334] "Generic (PLEG): container finished" podID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerID="b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5" exitCode=0 Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.156192 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vm9n" event={"ID":"1a8e9a9e-955b-401d-80eb-e8556e3a5269","Type":"ContainerDied","Data":"b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5"} Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.156222 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vm9n" event={"ID":"1a8e9a9e-955b-401d-80eb-e8556e3a5269","Type":"ContainerDied","Data":"f1b46ace197e914bffa03911258897c2bc6236da9f7dc7fe25f9955fba479781"} Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.156243 4861 scope.go:117] "RemoveContainer" containerID="b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.156244 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vm9n" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.183373 4861 scope.go:117] "RemoveContainer" containerID="84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.191356 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4vm9n"] Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.199381 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4vm9n"] Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.214995 4861 scope.go:117] "RemoveContainer" containerID="2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.255092 4861 scope.go:117] "RemoveContainer" containerID="b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5" Jan 29 08:29:48 crc kubenswrapper[4861]: E0129 08:29:48.255761 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5\": container with ID starting with b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5 not found: ID does not exist" containerID="b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.255799 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5"} err="failed to get container status \"b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5\": rpc error: code = NotFound desc = could not find container \"b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5\": container with ID starting with b78b7f0d797af90820c1b95c32605dcbbd2bbe2ba3aa4b8c20e8e01e19e1daf5 not found: ID does not exist" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.255823 4861 scope.go:117] "RemoveContainer" containerID="84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45" Jan 29 08:29:48 crc kubenswrapper[4861]: E0129 08:29:48.256211 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45\": container with ID starting with 84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45 not found: ID does not exist" containerID="84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.256240 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45"} err="failed to get container status \"84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45\": rpc error: code = NotFound desc = could not find container \"84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45\": container with ID starting with 84e82a4894c6800a2a363cf04a55a8e3df1dcc98bac5bc0c99b8163fc3e71f45 not found: ID does not exist" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.256254 4861 scope.go:117] "RemoveContainer" containerID="2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce" Jan 29 08:29:48 crc kubenswrapper[4861]: E0129 08:29:48.256638 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce\": container with ID starting with 2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce not found: ID does not exist" containerID="2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce" Jan 29 08:29:48 crc kubenswrapper[4861]: I0129 08:29:48.256666 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce"} err="failed to get container status \"2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce\": rpc error: code = NotFound desc = could not find container \"2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce\": container with ID starting with 2a44c13fe589e337fb0c8d3008c277723f623753a130adc2372134b2add319ce not found: ID does not exist" Jan 29 08:29:49 crc kubenswrapper[4861]: I0129 08:29:49.133929 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" path="/var/lib/kubelet/pods/1a8e9a9e-955b-401d-80eb-e8556e3a5269/volumes" Jan 29 08:29:54 crc kubenswrapper[4861]: I0129 08:29:54.679542 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lw8sp" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="registry-server" probeResult="failure" output=< Jan 29 08:29:54 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:29:54 crc kubenswrapper[4861]: > Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.184417 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g"] Jan 29 08:30:00 crc kubenswrapper[4861]: E0129 08:30:00.185608 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="extract-utilities" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.185623 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="extract-utilities" Jan 29 08:30:00 crc kubenswrapper[4861]: E0129 08:30:00.185640 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="extract-content" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.185647 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="extract-content" Jan 29 08:30:00 crc kubenswrapper[4861]: E0129 08:30:00.185667 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="registry-server" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.185675 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="registry-server" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.185939 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a8e9a9e-955b-401d-80eb-e8556e3a5269" containerName="registry-server" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.186852 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.190196 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.190255 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.200635 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g"] Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.247356 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-config-volume\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.247551 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk48g\" (UniqueName: \"kubernetes.io/projected/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-kube-api-access-wk48g\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.247689 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-secret-volume\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.349676 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-config-volume\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.349779 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk48g\" (UniqueName: \"kubernetes.io/projected/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-kube-api-access-wk48g\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.350012 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-secret-volume\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.350556 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-config-volume\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.364703 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-secret-volume\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.368056 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk48g\" (UniqueName: \"kubernetes.io/projected/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-kube-api-access-wk48g\") pod \"collect-profiles-29494590-z8d7g\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.520798 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.629566 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.629910 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.629969 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.630799 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"30135386b1ff7ecf0c082bc48e918189cfa4474d420f76eda1cd94f3d751fc19"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:30:00 crc kubenswrapper[4861]: I0129 08:30:00.630854 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://30135386b1ff7ecf0c082bc48e918189cfa4474d420f76eda1cd94f3d751fc19" gracePeriod=600 Jan 29 08:30:01 crc kubenswrapper[4861]: I0129 08:30:01.016064 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g"] Jan 29 08:30:01 crc kubenswrapper[4861]: I0129 08:30:01.271987 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" event={"ID":"d76c0b51-8664-4916-9cb4-03cee1a4b2b7","Type":"ContainerStarted","Data":"4ecb1fdafa15c83a3776674b3804e1e0c4324ef847dd7ee8495690710b36a431"} Jan 29 08:30:01 crc kubenswrapper[4861]: I0129 08:30:01.272035 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" event={"ID":"d76c0b51-8664-4916-9cb4-03cee1a4b2b7","Type":"ContainerStarted","Data":"360fa6c1e8e029afe4a769c23c5c076c91af32f3e357d4f0b76dd3c0d0300a6e"} Jan 29 08:30:01 crc kubenswrapper[4861]: I0129 08:30:01.279389 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="30135386b1ff7ecf0c082bc48e918189cfa4474d420f76eda1cd94f3d751fc19" exitCode=0 Jan 29 08:30:01 crc kubenswrapper[4861]: I0129 08:30:01.279616 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"30135386b1ff7ecf0c082bc48e918189cfa4474d420f76eda1cd94f3d751fc19"} Jan 29 08:30:01 crc kubenswrapper[4861]: I0129 08:30:01.281527 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751"} Jan 29 08:30:01 crc kubenswrapper[4861]: I0129 08:30:01.281608 4861 scope.go:117] "RemoveContainer" containerID="18a1fa2981c85f8363c07fca20004e2abdac623b40bbe0e3c062b56da9b6e34b" Jan 29 08:30:01 crc kubenswrapper[4861]: I0129 08:30:01.294843 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" podStartSLOduration=1.2948294009999999 podStartE2EDuration="1.294829401s" podCreationTimestamp="2026-01-29 08:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:30:01.292318165 +0000 UTC m=+6892.963812742" watchObservedRunningTime="2026-01-29 08:30:01.294829401 +0000 UTC m=+6892.966323958" Jan 29 08:30:02 crc kubenswrapper[4861]: I0129 08:30:02.290750 4861 generic.go:334] "Generic (PLEG): container finished" podID="d76c0b51-8664-4916-9cb4-03cee1a4b2b7" containerID="4ecb1fdafa15c83a3776674b3804e1e0c4324ef847dd7ee8495690710b36a431" exitCode=0 Jan 29 08:30:02 crc kubenswrapper[4861]: I0129 08:30:02.290841 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" event={"ID":"d76c0b51-8664-4916-9cb4-03cee1a4b2b7","Type":"ContainerDied","Data":"4ecb1fdafa15c83a3776674b3804e1e0c4324ef847dd7ee8495690710b36a431"} Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.683436 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.734774 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.740939 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.828344 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk48g\" (UniqueName: \"kubernetes.io/projected/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-kube-api-access-wk48g\") pod \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.828547 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-secret-volume\") pod \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.828654 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-config-volume\") pod \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\" (UID: \"d76c0b51-8664-4916-9cb4-03cee1a4b2b7\") " Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.829687 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-config-volume" (OuterVolumeSpecName: "config-volume") pod "d76c0b51-8664-4916-9cb4-03cee1a4b2b7" (UID: "d76c0b51-8664-4916-9cb4-03cee1a4b2b7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.833591 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d76c0b51-8664-4916-9cb4-03cee1a4b2b7" (UID: "d76c0b51-8664-4916-9cb4-03cee1a4b2b7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.833951 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-kube-api-access-wk48g" (OuterVolumeSpecName: "kube-api-access-wk48g") pod "d76c0b51-8664-4916-9cb4-03cee1a4b2b7" (UID: "d76c0b51-8664-4916-9cb4-03cee1a4b2b7"). InnerVolumeSpecName "kube-api-access-wk48g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.924553 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lw8sp"] Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.931018 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.931043 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 08:30:03 crc kubenswrapper[4861]: I0129 08:30:03.931053 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk48g\" (UniqueName: \"kubernetes.io/projected/d76c0b51-8664-4916-9cb4-03cee1a4b2b7-kube-api-access-wk48g\") on node \"crc\" DevicePath \"\"" Jan 29 08:30:04 crc kubenswrapper[4861]: I0129 08:30:04.327763 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" Jan 29 08:30:04 crc kubenswrapper[4861]: I0129 08:30:04.328159 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g" event={"ID":"d76c0b51-8664-4916-9cb4-03cee1a4b2b7","Type":"ContainerDied","Data":"360fa6c1e8e029afe4a769c23c5c076c91af32f3e357d4f0b76dd3c0d0300a6e"} Jan 29 08:30:04 crc kubenswrapper[4861]: I0129 08:30:04.328197 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="360fa6c1e8e029afe4a769c23c5c076c91af32f3e357d4f0b76dd3c0d0300a6e" Jan 29 08:30:04 crc kubenswrapper[4861]: I0129 08:30:04.381800 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l"] Jan 29 08:30:04 crc kubenswrapper[4861]: I0129 08:30:04.390316 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494545-m4n6l"] Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.133597 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01b12670-e056-49b0-a9a3-8680110c1c1c" path="/var/lib/kubelet/pods/01b12670-e056-49b0-a9a3-8680110c1c1c/volumes" Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.334702 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lw8sp" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="registry-server" containerID="cri-o://d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7" gracePeriod=2 Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.857591 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.873396 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-catalog-content\") pod \"89f344e5-be53-4158-8416-e0a62b697bc0\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.873491 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-utilities\") pod \"89f344e5-be53-4158-8416-e0a62b697bc0\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.873635 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phg2r\" (UniqueName: \"kubernetes.io/projected/89f344e5-be53-4158-8416-e0a62b697bc0-kube-api-access-phg2r\") pod \"89f344e5-be53-4158-8416-e0a62b697bc0\" (UID: \"89f344e5-be53-4158-8416-e0a62b697bc0\") " Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.874465 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-utilities" (OuterVolumeSpecName: "utilities") pod "89f344e5-be53-4158-8416-e0a62b697bc0" (UID: "89f344e5-be53-4158-8416-e0a62b697bc0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.878839 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89f344e5-be53-4158-8416-e0a62b697bc0-kube-api-access-phg2r" (OuterVolumeSpecName: "kube-api-access-phg2r") pod "89f344e5-be53-4158-8416-e0a62b697bc0" (UID: "89f344e5-be53-4158-8416-e0a62b697bc0"). InnerVolumeSpecName "kube-api-access-phg2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.976731 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:30:05 crc kubenswrapper[4861]: I0129 08:30:05.976762 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phg2r\" (UniqueName: \"kubernetes.io/projected/89f344e5-be53-4158-8416-e0a62b697bc0-kube-api-access-phg2r\") on node \"crc\" DevicePath \"\"" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.013514 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89f344e5-be53-4158-8416-e0a62b697bc0" (UID: "89f344e5-be53-4158-8416-e0a62b697bc0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.080920 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f344e5-be53-4158-8416-e0a62b697bc0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.348629 4861 generic.go:334] "Generic (PLEG): container finished" podID="89f344e5-be53-4158-8416-e0a62b697bc0" containerID="d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7" exitCode=0 Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.348682 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lw8sp" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.348682 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw8sp" event={"ID":"89f344e5-be53-4158-8416-e0a62b697bc0","Type":"ContainerDied","Data":"d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7"} Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.348855 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lw8sp" event={"ID":"89f344e5-be53-4158-8416-e0a62b697bc0","Type":"ContainerDied","Data":"afea3fc2b60de8ccdf599a9530b786a51c2380ff6e60740609432cf8c1cb3f1b"} Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.348885 4861 scope.go:117] "RemoveContainer" containerID="d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.386826 4861 scope.go:117] "RemoveContainer" containerID="9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.388489 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lw8sp"] Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.396904 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lw8sp"] Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.411478 4861 scope.go:117] "RemoveContainer" containerID="ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.471590 4861 scope.go:117] "RemoveContainer" containerID="d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7" Jan 29 08:30:06 crc kubenswrapper[4861]: E0129 08:30:06.472042 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7\": container with ID starting with d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7 not found: ID does not exist" containerID="d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.472133 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7"} err="failed to get container status \"d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7\": rpc error: code = NotFound desc = could not find container \"d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7\": container with ID starting with d963f295fbee599ee2f3da9f39d79677088345eb356b4421be1e80ba366b2dd7 not found: ID does not exist" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.472176 4861 scope.go:117] "RemoveContainer" containerID="9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245" Jan 29 08:30:06 crc kubenswrapper[4861]: E0129 08:30:06.472746 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245\": container with ID starting with 9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245 not found: ID does not exist" containerID="9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.472788 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245"} err="failed to get container status \"9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245\": rpc error: code = NotFound desc = could not find container \"9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245\": container with ID starting with 9abb9e6ba93ad4d9b0806e413922be1e32c6082e1698546ef6ff80cfb08a5245 not found: ID does not exist" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.472815 4861 scope.go:117] "RemoveContainer" containerID="ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e" Jan 29 08:30:06 crc kubenswrapper[4861]: E0129 08:30:06.473264 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e\": container with ID starting with ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e not found: ID does not exist" containerID="ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e" Jan 29 08:30:06 crc kubenswrapper[4861]: I0129 08:30:06.473292 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e"} err="failed to get container status \"ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e\": rpc error: code = NotFound desc = could not find container \"ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e\": container with ID starting with ce1a41fe8f2177c909e6886f615633e2f7a698c27a1494792b00dfc8cb5f085e not found: ID does not exist" Jan 29 08:30:07 crc kubenswrapper[4861]: I0129 08:30:07.129270 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" path="/var/lib/kubelet/pods/89f344e5-be53-4158-8416-e0a62b697bc0/volumes" Jan 29 08:30:31 crc kubenswrapper[4861]: I0129 08:30:31.043187 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-bfv2w"] Jan 29 08:30:31 crc kubenswrapper[4861]: I0129 08:30:31.051797 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-bfv2w"] Jan 29 08:30:31 crc kubenswrapper[4861]: I0129 08:30:31.059514 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-8ed8-account-create-update-qc2hx"] Jan 29 08:30:31 crc kubenswrapper[4861]: I0129 08:30:31.067171 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-8ed8-account-create-update-qc2hx"] Jan 29 08:30:31 crc kubenswrapper[4861]: I0129 08:30:31.129219 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b" path="/var/lib/kubelet/pods/28f80e2d-ad83-4ee1-b4f6-6de5197c4b0b/volumes" Jan 29 08:30:31 crc kubenswrapper[4861]: I0129 08:30:31.130002 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b" path="/var/lib/kubelet/pods/5fc09f11-03aa-4c40-aae8-d71fa4fb4a6b/volumes" Jan 29 08:30:32 crc kubenswrapper[4861]: I0129 08:30:32.195411 4861 scope.go:117] "RemoveContainer" containerID="d5efa4905b44535bdb9819b6076d819575eb23a73b969bb50c940bd3147ff0a2" Jan 29 08:30:32 crc kubenswrapper[4861]: I0129 08:30:32.229208 4861 scope.go:117] "RemoveContainer" containerID="ed85ed353b8a57df9e96bc2bd576de5481b8db5ba9c82ff0189c462b69b225cf" Jan 29 08:30:32 crc kubenswrapper[4861]: I0129 08:30:32.319379 4861 scope.go:117] "RemoveContainer" containerID="c2fa11e106c7ee126d3c44aecbe3c2d32eaeb83ee7237ec177e1dc7ce5b164a5" Jan 29 08:30:37 crc kubenswrapper[4861]: I0129 08:30:37.918324 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-7dc5d457f5-tv7x9" podUID="758c49ec-0604-450b-8d71-6a01e3993cb6" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 502" Jan 29 08:30:44 crc kubenswrapper[4861]: I0129 08:30:44.049909 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-4mhs8"] Jan 29 08:30:44 crc kubenswrapper[4861]: I0129 08:30:44.059916 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-4mhs8"] Jan 29 08:30:45 crc kubenswrapper[4861]: I0129 08:30:45.129732 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40eac315-cbc0-4b34-b276-bb81c3df7afe" path="/var/lib/kubelet/pods/40eac315-cbc0-4b34-b276-bb81c3df7afe/volumes" Jan 29 08:31:32 crc kubenswrapper[4861]: I0129 08:31:32.487762 4861 scope.go:117] "RemoveContainer" containerID="17f8da8584b2c53558311b3de4186a2ffd5b8417b345d14e1571c8fad788ee07" Jan 29 08:32:00 crc kubenswrapper[4861]: I0129 08:32:00.629971 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:32:00 crc kubenswrapper[4861]: I0129 08:32:00.630554 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:32:30 crc kubenswrapper[4861]: I0129 08:32:30.630040 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:32:30 crc kubenswrapper[4861]: I0129 08:32:30.630729 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:33:00 crc kubenswrapper[4861]: I0129 08:33:00.629620 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:33:00 crc kubenswrapper[4861]: I0129 08:33:00.630180 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:33:00 crc kubenswrapper[4861]: I0129 08:33:00.630230 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:33:00 crc kubenswrapper[4861]: I0129 08:33:00.630951 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:33:00 crc kubenswrapper[4861]: I0129 08:33:00.631000 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" gracePeriod=600 Jan 29 08:33:00 crc kubenswrapper[4861]: E0129 08:33:00.762208 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:33:01 crc kubenswrapper[4861]: I0129 08:33:01.016252 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" exitCode=0 Jan 29 08:33:01 crc kubenswrapper[4861]: I0129 08:33:01.016301 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751"} Jan 29 08:33:01 crc kubenswrapper[4861]: I0129 08:33:01.016340 4861 scope.go:117] "RemoveContainer" containerID="30135386b1ff7ecf0c082bc48e918189cfa4474d420f76eda1cd94f3d751fc19" Jan 29 08:33:01 crc kubenswrapper[4861]: I0129 08:33:01.017027 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:33:01 crc kubenswrapper[4861]: E0129 08:33:01.017317 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:33:12 crc kubenswrapper[4861]: I0129 08:33:12.116054 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:33:12 crc kubenswrapper[4861]: E0129 08:33:12.116650 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:33:24 crc kubenswrapper[4861]: I0129 08:33:24.117327 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:33:24 crc kubenswrapper[4861]: E0129 08:33:24.118585 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:33:32 crc kubenswrapper[4861]: I0129 08:33:32.330256 4861 generic.go:334] "Generic (PLEG): container finished" podID="7d1929c2-3989-426c-9af1-4c54abe0ab7e" containerID="b49c9c76fec9980e16f1e5b924bd2130cfaa03d7d7cc9f6b302336032389cf48" exitCode=0 Jan 29 08:33:32 crc kubenswrapper[4861]: I0129 08:33:32.330380 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" event={"ID":"7d1929c2-3989-426c-9af1-4c54abe0ab7e","Type":"ContainerDied","Data":"b49c9c76fec9980e16f1e5b924bd2130cfaa03d7d7cc9f6b302336032389cf48"} Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.759792 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.862201 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-ssh-key-openstack-cell1\") pod \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.862274 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82vr7\" (UniqueName: \"kubernetes.io/projected/7d1929c2-3989-426c-9af1-4c54abe0ab7e-kube-api-access-82vr7\") pod \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.862357 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-tripleo-cleanup-combined-ca-bundle\") pod \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.862386 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-inventory\") pod \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\" (UID: \"7d1929c2-3989-426c-9af1-4c54abe0ab7e\") " Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.867798 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "7d1929c2-3989-426c-9af1-4c54abe0ab7e" (UID: "7d1929c2-3989-426c-9af1-4c54abe0ab7e"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.868121 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d1929c2-3989-426c-9af1-4c54abe0ab7e-kube-api-access-82vr7" (OuterVolumeSpecName: "kube-api-access-82vr7") pod "7d1929c2-3989-426c-9af1-4c54abe0ab7e" (UID: "7d1929c2-3989-426c-9af1-4c54abe0ab7e"). InnerVolumeSpecName "kube-api-access-82vr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.896896 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "7d1929c2-3989-426c-9af1-4c54abe0ab7e" (UID: "7d1929c2-3989-426c-9af1-4c54abe0ab7e"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.897335 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-inventory" (OuterVolumeSpecName: "inventory") pod "7d1929c2-3989-426c-9af1-4c54abe0ab7e" (UID: "7d1929c2-3989-426c-9af1-4c54abe0ab7e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.965575 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.965656 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82vr7\" (UniqueName: \"kubernetes.io/projected/7d1929c2-3989-426c-9af1-4c54abe0ab7e-kube-api-access-82vr7\") on node \"crc\" DevicePath \"\"" Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.965671 4861 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:33:33 crc kubenswrapper[4861]: I0129 08:33:33.965686 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1929c2-3989-426c-9af1-4c54abe0ab7e-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:33:34 crc kubenswrapper[4861]: I0129 08:33:34.352181 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" event={"ID":"7d1929c2-3989-426c-9af1-4c54abe0ab7e","Type":"ContainerDied","Data":"61db711ac80f019b07cee3ffec16fecde9e467647c46b3cf377cac79038d517e"} Jan 29 08:33:34 crc kubenswrapper[4861]: I0129 08:33:34.352467 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61db711ac80f019b07cee3ffec16fecde9e467647c46b3cf377cac79038d517e" Jan 29 08:33:34 crc kubenswrapper[4861]: I0129 08:33:34.352233 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv" Jan 29 08:33:38 crc kubenswrapper[4861]: I0129 08:33:38.117746 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:33:38 crc kubenswrapper[4861]: E0129 08:33:38.119006 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.439818 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-tz2gk"] Jan 29 08:33:42 crc kubenswrapper[4861]: E0129 08:33:42.442454 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="extract-utilities" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.442561 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="extract-utilities" Jan 29 08:33:42 crc kubenswrapper[4861]: E0129 08:33:42.442654 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="registry-server" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.450272 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="registry-server" Jan 29 08:33:42 crc kubenswrapper[4861]: E0129 08:33:42.450337 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1929c2-3989-426c-9af1-4c54abe0ab7e" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.450349 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1929c2-3989-426c-9af1-4c54abe0ab7e" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 29 08:33:42 crc kubenswrapper[4861]: E0129 08:33:42.450400 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="extract-content" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.450409 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="extract-content" Jan 29 08:33:42 crc kubenswrapper[4861]: E0129 08:33:42.450424 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d76c0b51-8664-4916-9cb4-03cee1a4b2b7" containerName="collect-profiles" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.450432 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d76c0b51-8664-4916-9cb4-03cee1a4b2b7" containerName="collect-profiles" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.450825 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d76c0b51-8664-4916-9cb4-03cee1a4b2b7" containerName="collect-profiles" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.450839 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="89f344e5-be53-4158-8416-e0a62b697bc0" containerName="registry-server" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.450865 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d1929c2-3989-426c-9af1-4c54abe0ab7e" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.451825 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.454637 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.454672 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.454855 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.454990 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.457571 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-tz2gk"] Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.535696 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlns2\" (UniqueName: \"kubernetes.io/projected/17876c4e-e24a-477b-86bf-aa99c0ae2803-kube-api-access-hlns2\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.536126 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.536250 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.536351 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-inventory\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.637923 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.638013 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-inventory\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.638135 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlns2\" (UniqueName: \"kubernetes.io/projected/17876c4e-e24a-477b-86bf-aa99c0ae2803-kube-api-access-hlns2\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.638458 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.644151 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-inventory\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.644735 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.645105 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.655356 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlns2\" (UniqueName: \"kubernetes.io/projected/17876c4e-e24a-477b-86bf-aa99c0ae2803-kube-api-access-hlns2\") pod \"bootstrap-openstack-openstack-cell1-tz2gk\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:42 crc kubenswrapper[4861]: I0129 08:33:42.793670 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:33:43 crc kubenswrapper[4861]: I0129 08:33:43.332653 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-tz2gk"] Jan 29 08:33:43 crc kubenswrapper[4861]: I0129 08:33:43.422882 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" event={"ID":"17876c4e-e24a-477b-86bf-aa99c0ae2803","Type":"ContainerStarted","Data":"519e01e498739876f4b1b14e3427fde4223fc1fddaaf1c3a6db45187cd436961"} Jan 29 08:33:44 crc kubenswrapper[4861]: I0129 08:33:44.433805 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" event={"ID":"17876c4e-e24a-477b-86bf-aa99c0ae2803","Type":"ContainerStarted","Data":"078eb0fcd7c1c541f53cbadecd1efbe507c85e3454b158b8ae01c764598deac3"} Jan 29 08:33:44 crc kubenswrapper[4861]: I0129 08:33:44.450947 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" podStartSLOduration=1.89051489 podStartE2EDuration="2.450927358s" podCreationTimestamp="2026-01-29 08:33:42 +0000 UTC" firstStartedPulling="2026-01-29 08:33:43.344652645 +0000 UTC m=+7115.016147202" lastFinishedPulling="2026-01-29 08:33:43.905065103 +0000 UTC m=+7115.576559670" observedRunningTime="2026-01-29 08:33:44.450096726 +0000 UTC m=+7116.121591283" watchObservedRunningTime="2026-01-29 08:33:44.450927358 +0000 UTC m=+7116.122421905" Jan 29 08:33:50 crc kubenswrapper[4861]: I0129 08:33:50.117445 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:33:50 crc kubenswrapper[4861]: E0129 08:33:50.119550 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:34:02 crc kubenswrapper[4861]: I0129 08:34:02.116433 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:34:02 crc kubenswrapper[4861]: E0129 08:34:02.117230 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:34:13 crc kubenswrapper[4861]: I0129 08:34:13.117631 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:34:13 crc kubenswrapper[4861]: E0129 08:34:13.118808 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.064979 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-r7d8j"] Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.069895 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.079395 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r7d8j"] Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.159747 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-catalog-content\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.159896 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-utilities\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.159924 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t57wq\" (UniqueName: \"kubernetes.io/projected/2b6cfc10-26c7-43a7-961d-a7444508d005-kube-api-access-t57wq\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.263043 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-catalog-content\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.263223 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-utilities\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.263276 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t57wq\" (UniqueName: \"kubernetes.io/projected/2b6cfc10-26c7-43a7-961d-a7444508d005-kube-api-access-t57wq\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.263746 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-catalog-content\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.263866 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-utilities\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.292047 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t57wq\" (UniqueName: \"kubernetes.io/projected/2b6cfc10-26c7-43a7-961d-a7444508d005-kube-api-access-t57wq\") pod \"redhat-marketplace-r7d8j\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.404094 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:18 crc kubenswrapper[4861]: I0129 08:34:18.936692 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r7d8j"] Jan 29 08:34:19 crc kubenswrapper[4861]: I0129 08:34:19.770687 4861 generic.go:334] "Generic (PLEG): container finished" podID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerID="3ba282ca35f76844e8efa3dc2a0340aea82ea0c5b63ba940a019bf94519154fd" exitCode=0 Jan 29 08:34:19 crc kubenswrapper[4861]: I0129 08:34:19.770949 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r7d8j" event={"ID":"2b6cfc10-26c7-43a7-961d-a7444508d005","Type":"ContainerDied","Data":"3ba282ca35f76844e8efa3dc2a0340aea82ea0c5b63ba940a019bf94519154fd"} Jan 29 08:34:19 crc kubenswrapper[4861]: I0129 08:34:19.770979 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r7d8j" event={"ID":"2b6cfc10-26c7-43a7-961d-a7444508d005","Type":"ContainerStarted","Data":"b175d57cbfbaab8baeacb0dbe29a0d5b536c211c060ef5b23bd3b8063bdda18c"} Jan 29 08:34:19 crc kubenswrapper[4861]: I0129 08:34:19.773314 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:34:20 crc kubenswrapper[4861]: I0129 08:34:20.784470 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r7d8j" event={"ID":"2b6cfc10-26c7-43a7-961d-a7444508d005","Type":"ContainerStarted","Data":"417d316a177d1ebed5bdf5706933af7fafa26c59e987c2f2aa4053b89018b8cc"} Jan 29 08:34:21 crc kubenswrapper[4861]: I0129 08:34:21.796694 4861 generic.go:334] "Generic (PLEG): container finished" podID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerID="417d316a177d1ebed5bdf5706933af7fafa26c59e987c2f2aa4053b89018b8cc" exitCode=0 Jan 29 08:34:21 crc kubenswrapper[4861]: I0129 08:34:21.796792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r7d8j" event={"ID":"2b6cfc10-26c7-43a7-961d-a7444508d005","Type":"ContainerDied","Data":"417d316a177d1ebed5bdf5706933af7fafa26c59e987c2f2aa4053b89018b8cc"} Jan 29 08:34:22 crc kubenswrapper[4861]: I0129 08:34:22.807813 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r7d8j" event={"ID":"2b6cfc10-26c7-43a7-961d-a7444508d005","Type":"ContainerStarted","Data":"edb373ffed7168c3c8a2c2d5358a0d7251621860c27be1195cc884f97d2d2cfe"} Jan 29 08:34:22 crc kubenswrapper[4861]: I0129 08:34:22.838924 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-r7d8j" podStartSLOduration=2.425351534 podStartE2EDuration="4.838900244s" podCreationTimestamp="2026-01-29 08:34:18 +0000 UTC" firstStartedPulling="2026-01-29 08:34:19.773105717 +0000 UTC m=+7151.444600274" lastFinishedPulling="2026-01-29 08:34:22.186654427 +0000 UTC m=+7153.858148984" observedRunningTime="2026-01-29 08:34:22.831979931 +0000 UTC m=+7154.503474508" watchObservedRunningTime="2026-01-29 08:34:22.838900244 +0000 UTC m=+7154.510394811" Jan 29 08:34:27 crc kubenswrapper[4861]: I0129 08:34:27.116788 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:34:27 crc kubenswrapper[4861]: E0129 08:34:27.117658 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:34:28 crc kubenswrapper[4861]: I0129 08:34:28.405418 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:28 crc kubenswrapper[4861]: I0129 08:34:28.405469 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:28 crc kubenswrapper[4861]: I0129 08:34:28.452644 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:28 crc kubenswrapper[4861]: I0129 08:34:28.914947 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:29 crc kubenswrapper[4861]: I0129 08:34:29.821967 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r7d8j"] Jan 29 08:34:30 crc kubenswrapper[4861]: I0129 08:34:30.890761 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-r7d8j" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerName="registry-server" containerID="cri-o://edb373ffed7168c3c8a2c2d5358a0d7251621860c27be1195cc884f97d2d2cfe" gracePeriod=2 Jan 29 08:34:31 crc kubenswrapper[4861]: I0129 08:34:31.921233 4861 generic.go:334] "Generic (PLEG): container finished" podID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerID="edb373ffed7168c3c8a2c2d5358a0d7251621860c27be1195cc884f97d2d2cfe" exitCode=0 Jan 29 08:34:31 crc kubenswrapper[4861]: I0129 08:34:31.922046 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r7d8j" event={"ID":"2b6cfc10-26c7-43a7-961d-a7444508d005","Type":"ContainerDied","Data":"edb373ffed7168c3c8a2c2d5358a0d7251621860c27be1195cc884f97d2d2cfe"} Jan 29 08:34:31 crc kubenswrapper[4861]: I0129 08:34:31.922097 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r7d8j" event={"ID":"2b6cfc10-26c7-43a7-961d-a7444508d005","Type":"ContainerDied","Data":"b175d57cbfbaab8baeacb0dbe29a0d5b536c211c060ef5b23bd3b8063bdda18c"} Jan 29 08:34:31 crc kubenswrapper[4861]: I0129 08:34:31.922116 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b175d57cbfbaab8baeacb0dbe29a0d5b536c211c060ef5b23bd3b8063bdda18c" Jan 29 08:34:31 crc kubenswrapper[4861]: I0129 08:34:31.969816 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.069352 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-catalog-content\") pod \"2b6cfc10-26c7-43a7-961d-a7444508d005\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.069656 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t57wq\" (UniqueName: \"kubernetes.io/projected/2b6cfc10-26c7-43a7-961d-a7444508d005-kube-api-access-t57wq\") pod \"2b6cfc10-26c7-43a7-961d-a7444508d005\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.069918 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-utilities\") pod \"2b6cfc10-26c7-43a7-961d-a7444508d005\" (UID: \"2b6cfc10-26c7-43a7-961d-a7444508d005\") " Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.072268 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-utilities" (OuterVolumeSpecName: "utilities") pod "2b6cfc10-26c7-43a7-961d-a7444508d005" (UID: "2b6cfc10-26c7-43a7-961d-a7444508d005"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.080584 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b6cfc10-26c7-43a7-961d-a7444508d005-kube-api-access-t57wq" (OuterVolumeSpecName: "kube-api-access-t57wq") pod "2b6cfc10-26c7-43a7-961d-a7444508d005" (UID: "2b6cfc10-26c7-43a7-961d-a7444508d005"). InnerVolumeSpecName "kube-api-access-t57wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.100244 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2b6cfc10-26c7-43a7-961d-a7444508d005" (UID: "2b6cfc10-26c7-43a7-961d-a7444508d005"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.174508 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t57wq\" (UniqueName: \"kubernetes.io/projected/2b6cfc10-26c7-43a7-961d-a7444508d005-kube-api-access-t57wq\") on node \"crc\" DevicePath \"\"" Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.174549 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.174562 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b6cfc10-26c7-43a7-961d-a7444508d005-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.928370 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r7d8j" Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.963862 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r7d8j"] Jan 29 08:34:32 crc kubenswrapper[4861]: I0129 08:34:32.971997 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-r7d8j"] Jan 29 08:34:33 crc kubenswrapper[4861]: I0129 08:34:33.127725 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" path="/var/lib/kubelet/pods/2b6cfc10-26c7-43a7-961d-a7444508d005/volumes" Jan 29 08:34:39 crc kubenswrapper[4861]: I0129 08:34:39.123552 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:34:39 crc kubenswrapper[4861]: E0129 08:34:39.124378 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:34:52 crc kubenswrapper[4861]: I0129 08:34:52.117165 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:34:52 crc kubenswrapper[4861]: E0129 08:34:52.117985 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:35:06 crc kubenswrapper[4861]: I0129 08:35:06.116352 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:35:06 crc kubenswrapper[4861]: E0129 08:35:06.117159 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:35:17 crc kubenswrapper[4861]: I0129 08:35:17.117051 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:35:17 crc kubenswrapper[4861]: E0129 08:35:17.117904 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:35:29 crc kubenswrapper[4861]: I0129 08:35:29.124686 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:35:29 crc kubenswrapper[4861]: E0129 08:35:29.125553 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:35:42 crc kubenswrapper[4861]: I0129 08:35:42.116574 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:35:42 crc kubenswrapper[4861]: E0129 08:35:42.117361 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:35:53 crc kubenswrapper[4861]: I0129 08:35:53.118623 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:35:53 crc kubenswrapper[4861]: E0129 08:35:53.119933 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:36:06 crc kubenswrapper[4861]: I0129 08:36:06.117141 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:36:06 crc kubenswrapper[4861]: E0129 08:36:06.119053 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:36:18 crc kubenswrapper[4861]: I0129 08:36:18.116758 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:36:18 crc kubenswrapper[4861]: E0129 08:36:18.118657 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:36:33 crc kubenswrapper[4861]: I0129 08:36:33.117026 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:36:33 crc kubenswrapper[4861]: E0129 08:36:33.117831 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:36:44 crc kubenswrapper[4861]: I0129 08:36:44.117213 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:36:44 crc kubenswrapper[4861]: E0129 08:36:44.118058 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:36:54 crc kubenswrapper[4861]: I0129 08:36:54.277720 4861 generic.go:334] "Generic (PLEG): container finished" podID="17876c4e-e24a-477b-86bf-aa99c0ae2803" containerID="078eb0fcd7c1c541f53cbadecd1efbe507c85e3454b158b8ae01c764598deac3" exitCode=0 Jan 29 08:36:54 crc kubenswrapper[4861]: I0129 08:36:54.277836 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" event={"ID":"17876c4e-e24a-477b-86bf-aa99c0ae2803","Type":"ContainerDied","Data":"078eb0fcd7c1c541f53cbadecd1efbe507c85e3454b158b8ae01c764598deac3"} Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.116555 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:36:55 crc kubenswrapper[4861]: E0129 08:36:55.117203 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.723008 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.821458 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-bootstrap-combined-ca-bundle\") pod \"17876c4e-e24a-477b-86bf-aa99c0ae2803\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.821636 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-ssh-key-openstack-cell1\") pod \"17876c4e-e24a-477b-86bf-aa99c0ae2803\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.821674 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-inventory\") pod \"17876c4e-e24a-477b-86bf-aa99c0ae2803\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.821719 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlns2\" (UniqueName: \"kubernetes.io/projected/17876c4e-e24a-477b-86bf-aa99c0ae2803-kube-api-access-hlns2\") pod \"17876c4e-e24a-477b-86bf-aa99c0ae2803\" (UID: \"17876c4e-e24a-477b-86bf-aa99c0ae2803\") " Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.826670 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "17876c4e-e24a-477b-86bf-aa99c0ae2803" (UID: "17876c4e-e24a-477b-86bf-aa99c0ae2803"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.827240 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17876c4e-e24a-477b-86bf-aa99c0ae2803-kube-api-access-hlns2" (OuterVolumeSpecName: "kube-api-access-hlns2") pod "17876c4e-e24a-477b-86bf-aa99c0ae2803" (UID: "17876c4e-e24a-477b-86bf-aa99c0ae2803"). InnerVolumeSpecName "kube-api-access-hlns2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.848864 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-inventory" (OuterVolumeSpecName: "inventory") pod "17876c4e-e24a-477b-86bf-aa99c0ae2803" (UID: "17876c4e-e24a-477b-86bf-aa99c0ae2803"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.851244 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "17876c4e-e24a-477b-86bf-aa99c0ae2803" (UID: "17876c4e-e24a-477b-86bf-aa99c0ae2803"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.924343 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.924397 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.924412 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlns2\" (UniqueName: \"kubernetes.io/projected/17876c4e-e24a-477b-86bf-aa99c0ae2803-kube-api-access-hlns2\") on node \"crc\" DevicePath \"\"" Jan 29 08:36:55 crc kubenswrapper[4861]: I0129 08:36:55.924427 4861 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17876c4e-e24a-477b-86bf-aa99c0ae2803-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.299831 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" event={"ID":"17876c4e-e24a-477b-86bf-aa99c0ae2803","Type":"ContainerDied","Data":"519e01e498739876f4b1b14e3427fde4223fc1fddaaf1c3a6db45187cd436961"} Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.300169 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="519e01e498739876f4b1b14e3427fde4223fc1fddaaf1c3a6db45187cd436961" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.299919 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-tz2gk" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.379005 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-l99sq"] Jan 29 08:36:56 crc kubenswrapper[4861]: E0129 08:36:56.379460 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17876c4e-e24a-477b-86bf-aa99c0ae2803" containerName="bootstrap-openstack-openstack-cell1" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.379478 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="17876c4e-e24a-477b-86bf-aa99c0ae2803" containerName="bootstrap-openstack-openstack-cell1" Jan 29 08:36:56 crc kubenswrapper[4861]: E0129 08:36:56.379488 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerName="extract-content" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.379495 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerName="extract-content" Jan 29 08:36:56 crc kubenswrapper[4861]: E0129 08:36:56.379512 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerName="registry-server" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.379518 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerName="registry-server" Jan 29 08:36:56 crc kubenswrapper[4861]: E0129 08:36:56.379541 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerName="extract-utilities" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.379547 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerName="extract-utilities" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.379734 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b6cfc10-26c7-43a7-961d-a7444508d005" containerName="registry-server" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.379749 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="17876c4e-e24a-477b-86bf-aa99c0ae2803" containerName="bootstrap-openstack-openstack-cell1" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.399976 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.402257 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.402285 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.402519 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.402689 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.408426 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-l99sq"] Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.540270 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.540328 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-inventory\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.540488 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgtn6\" (UniqueName: \"kubernetes.io/projected/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-kube-api-access-cgtn6\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.642408 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.642455 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-inventory\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.642510 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgtn6\" (UniqueName: \"kubernetes.io/projected/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-kube-api-access-cgtn6\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.646535 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.646646 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-inventory\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.659307 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgtn6\" (UniqueName: \"kubernetes.io/projected/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-kube-api-access-cgtn6\") pod \"download-cache-openstack-openstack-cell1-l99sq\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:56 crc kubenswrapper[4861]: I0129 08:36:56.719714 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:36:57 crc kubenswrapper[4861]: I0129 08:36:57.264676 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-l99sq"] Jan 29 08:36:57 crc kubenswrapper[4861]: I0129 08:36:57.312438 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" event={"ID":"4ab2f092-f6e2-4db8-b5ac-bfb83fada584","Type":"ContainerStarted","Data":"94551d5a065c0bd7979fdf1ad2d685bccf4571db3894abd358243e64eadd437a"} Jan 29 08:36:58 crc kubenswrapper[4861]: I0129 08:36:58.328771 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" event={"ID":"4ab2f092-f6e2-4db8-b5ac-bfb83fada584","Type":"ContainerStarted","Data":"4b9fbc478d5018a1a8944b8a2bc60ff45aff363994e7d69ac5c286ffe182c641"} Jan 29 08:36:58 crc kubenswrapper[4861]: I0129 08:36:58.352548 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" podStartSLOduration=1.7348586080000001 podStartE2EDuration="2.352516544s" podCreationTimestamp="2026-01-29 08:36:56 +0000 UTC" firstStartedPulling="2026-01-29 08:36:57.281612222 +0000 UTC m=+7308.953106789" lastFinishedPulling="2026-01-29 08:36:57.899270178 +0000 UTC m=+7309.570764725" observedRunningTime="2026-01-29 08:36:58.349494964 +0000 UTC m=+7310.020989521" watchObservedRunningTime="2026-01-29 08:36:58.352516544 +0000 UTC m=+7310.024011111" Jan 29 08:37:10 crc kubenswrapper[4861]: I0129 08:37:10.116780 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:37:10 crc kubenswrapper[4861]: E0129 08:37:10.117619 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.169647 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lj58f"] Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.172926 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.181997 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lj58f"] Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.294936 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-utilities\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.295119 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d7pn\" (UniqueName: \"kubernetes.io/projected/f4740371-a108-4477-9f3e-331a56bed8aa-kube-api-access-8d7pn\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.295209 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-catalog-content\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.397942 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-utilities\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.398168 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d7pn\" (UniqueName: \"kubernetes.io/projected/f4740371-a108-4477-9f3e-331a56bed8aa-kube-api-access-8d7pn\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.398261 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-catalog-content\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.398526 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-utilities\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.398683 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-catalog-content\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.418457 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d7pn\" (UniqueName: \"kubernetes.io/projected/f4740371-a108-4477-9f3e-331a56bed8aa-kube-api-access-8d7pn\") pod \"community-operators-lj58f\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:22 crc kubenswrapper[4861]: I0129 08:37:22.495772 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:23 crc kubenswrapper[4861]: I0129 08:37:23.118041 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:37:23 crc kubenswrapper[4861]: E0129 08:37:23.118746 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:37:23 crc kubenswrapper[4861]: I0129 08:37:23.164523 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lj58f"] Jan 29 08:37:23 crc kubenswrapper[4861]: I0129 08:37:23.554639 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4740371-a108-4477-9f3e-331a56bed8aa" containerID="0596f440336857214793e9d5b2ff504d4f1923d1c88feff8808f4789dc75b0c1" exitCode=0 Jan 29 08:37:23 crc kubenswrapper[4861]: I0129 08:37:23.555025 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj58f" event={"ID":"f4740371-a108-4477-9f3e-331a56bed8aa","Type":"ContainerDied","Data":"0596f440336857214793e9d5b2ff504d4f1923d1c88feff8808f4789dc75b0c1"} Jan 29 08:37:23 crc kubenswrapper[4861]: I0129 08:37:23.555240 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj58f" event={"ID":"f4740371-a108-4477-9f3e-331a56bed8aa","Type":"ContainerStarted","Data":"8a27f3e3c4cd130b0902edc6d4803d1c82fad23b05fcb309e297d378a7b4cd48"} Jan 29 08:37:25 crc kubenswrapper[4861]: I0129 08:37:25.574386 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj58f" event={"ID":"f4740371-a108-4477-9f3e-331a56bed8aa","Type":"ContainerStarted","Data":"95da3f7324f19c85a7bfdd16e2ef6b1e33d147f80b69ea68fbc8eeb23492009f"} Jan 29 08:37:28 crc kubenswrapper[4861]: I0129 08:37:28.603767 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4740371-a108-4477-9f3e-331a56bed8aa" containerID="95da3f7324f19c85a7bfdd16e2ef6b1e33d147f80b69ea68fbc8eeb23492009f" exitCode=0 Jan 29 08:37:28 crc kubenswrapper[4861]: I0129 08:37:28.604020 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj58f" event={"ID":"f4740371-a108-4477-9f3e-331a56bed8aa","Type":"ContainerDied","Data":"95da3f7324f19c85a7bfdd16e2ef6b1e33d147f80b69ea68fbc8eeb23492009f"} Jan 29 08:37:30 crc kubenswrapper[4861]: I0129 08:37:30.623838 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj58f" event={"ID":"f4740371-a108-4477-9f3e-331a56bed8aa","Type":"ContainerStarted","Data":"d952670ef287917ee3202b65f25023f5fa897c8a0c2f59c0b10f7c7f156928f5"} Jan 29 08:37:30 crc kubenswrapper[4861]: I0129 08:37:30.649391 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lj58f" podStartSLOduration=2.5505829330000003 podStartE2EDuration="8.649371912s" podCreationTimestamp="2026-01-29 08:37:22 +0000 UTC" firstStartedPulling="2026-01-29 08:37:23.556787506 +0000 UTC m=+7335.228282053" lastFinishedPulling="2026-01-29 08:37:29.655576435 +0000 UTC m=+7341.327071032" observedRunningTime="2026-01-29 08:37:30.640519576 +0000 UTC m=+7342.312014153" watchObservedRunningTime="2026-01-29 08:37:30.649371912 +0000 UTC m=+7342.320866469" Jan 29 08:37:32 crc kubenswrapper[4861]: I0129 08:37:32.496526 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:32 crc kubenswrapper[4861]: I0129 08:37:32.496817 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:32 crc kubenswrapper[4861]: I0129 08:37:32.551956 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:34 crc kubenswrapper[4861]: I0129 08:37:34.117047 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:37:34 crc kubenswrapper[4861]: E0129 08:37:34.118008 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:37:42 crc kubenswrapper[4861]: I0129 08:37:42.550027 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:42 crc kubenswrapper[4861]: I0129 08:37:42.597988 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lj58f"] Jan 29 08:37:42 crc kubenswrapper[4861]: I0129 08:37:42.732678 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lj58f" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" containerName="registry-server" containerID="cri-o://d952670ef287917ee3202b65f25023f5fa897c8a0c2f59c0b10f7c7f156928f5" gracePeriod=2 Jan 29 08:37:43 crc kubenswrapper[4861]: I0129 08:37:43.747491 4861 generic.go:334] "Generic (PLEG): container finished" podID="f4740371-a108-4477-9f3e-331a56bed8aa" containerID="d952670ef287917ee3202b65f25023f5fa897c8a0c2f59c0b10f7c7f156928f5" exitCode=0 Jan 29 08:37:43 crc kubenswrapper[4861]: I0129 08:37:43.747551 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj58f" event={"ID":"f4740371-a108-4477-9f3e-331a56bed8aa","Type":"ContainerDied","Data":"d952670ef287917ee3202b65f25023f5fa897c8a0c2f59c0b10f7c7f156928f5"} Jan 29 08:37:43 crc kubenswrapper[4861]: I0129 08:37:43.890698 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.084021 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-catalog-content\") pod \"f4740371-a108-4477-9f3e-331a56bed8aa\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.084451 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-utilities\") pod \"f4740371-a108-4477-9f3e-331a56bed8aa\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.084520 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d7pn\" (UniqueName: \"kubernetes.io/projected/f4740371-a108-4477-9f3e-331a56bed8aa-kube-api-access-8d7pn\") pod \"f4740371-a108-4477-9f3e-331a56bed8aa\" (UID: \"f4740371-a108-4477-9f3e-331a56bed8aa\") " Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.085390 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-utilities" (OuterVolumeSpecName: "utilities") pod "f4740371-a108-4477-9f3e-331a56bed8aa" (UID: "f4740371-a108-4477-9f3e-331a56bed8aa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.089888 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4740371-a108-4477-9f3e-331a56bed8aa-kube-api-access-8d7pn" (OuterVolumeSpecName: "kube-api-access-8d7pn") pod "f4740371-a108-4477-9f3e-331a56bed8aa" (UID: "f4740371-a108-4477-9f3e-331a56bed8aa"). InnerVolumeSpecName "kube-api-access-8d7pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.134240 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4740371-a108-4477-9f3e-331a56bed8aa" (UID: "f4740371-a108-4477-9f3e-331a56bed8aa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.186778 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.186825 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4740371-a108-4477-9f3e-331a56bed8aa-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.186838 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d7pn\" (UniqueName: \"kubernetes.io/projected/f4740371-a108-4477-9f3e-331a56bed8aa-kube-api-access-8d7pn\") on node \"crc\" DevicePath \"\"" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.762397 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lj58f" event={"ID":"f4740371-a108-4477-9f3e-331a56bed8aa","Type":"ContainerDied","Data":"8a27f3e3c4cd130b0902edc6d4803d1c82fad23b05fcb309e297d378a7b4cd48"} Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.762444 4861 scope.go:117] "RemoveContainer" containerID="d952670ef287917ee3202b65f25023f5fa897c8a0c2f59c0b10f7c7f156928f5" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.762465 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lj58f" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.790148 4861 scope.go:117] "RemoveContainer" containerID="95da3f7324f19c85a7bfdd16e2ef6b1e33d147f80b69ea68fbc8eeb23492009f" Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.815765 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lj58f"] Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.824320 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lj58f"] Jan 29 08:37:44 crc kubenswrapper[4861]: I0129 08:37:44.825760 4861 scope.go:117] "RemoveContainer" containerID="0596f440336857214793e9d5b2ff504d4f1923d1c88feff8808f4789dc75b0c1" Jan 29 08:37:45 crc kubenswrapper[4861]: I0129 08:37:45.128308 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" path="/var/lib/kubelet/pods/f4740371-a108-4477-9f3e-331a56bed8aa/volumes" Jan 29 08:37:47 crc kubenswrapper[4861]: I0129 08:37:47.117272 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:37:47 crc kubenswrapper[4861]: E0129 08:37:47.117796 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:37:58 crc kubenswrapper[4861]: I0129 08:37:58.116188 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:37:58 crc kubenswrapper[4861]: E0129 08:37:58.117233 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:38:11 crc kubenswrapper[4861]: I0129 08:38:11.116726 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:38:11 crc kubenswrapper[4861]: I0129 08:38:11.998296 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"5a4be9b1e7d970af3d8f175e27e26eae76f837f496a6c2d96c5fa17cc090c87a"} Jan 29 08:38:28 crc kubenswrapper[4861]: I0129 08:38:28.151348 4861 generic.go:334] "Generic (PLEG): container finished" podID="4ab2f092-f6e2-4db8-b5ac-bfb83fada584" containerID="4b9fbc478d5018a1a8944b8a2bc60ff45aff363994e7d69ac5c286ffe182c641" exitCode=0 Jan 29 08:38:28 crc kubenswrapper[4861]: I0129 08:38:28.151451 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" event={"ID":"4ab2f092-f6e2-4db8-b5ac-bfb83fada584","Type":"ContainerDied","Data":"4b9fbc478d5018a1a8944b8a2bc60ff45aff363994e7d69ac5c286ffe182c641"} Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.675251 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.849312 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-inventory\") pod \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.849634 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-ssh-key-openstack-cell1\") pod \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.850253 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgtn6\" (UniqueName: \"kubernetes.io/projected/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-kube-api-access-cgtn6\") pod \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\" (UID: \"4ab2f092-f6e2-4db8-b5ac-bfb83fada584\") " Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.856302 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-kube-api-access-cgtn6" (OuterVolumeSpecName: "kube-api-access-cgtn6") pod "4ab2f092-f6e2-4db8-b5ac-bfb83fada584" (UID: "4ab2f092-f6e2-4db8-b5ac-bfb83fada584"). InnerVolumeSpecName "kube-api-access-cgtn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.881254 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-inventory" (OuterVolumeSpecName: "inventory") pod "4ab2f092-f6e2-4db8-b5ac-bfb83fada584" (UID: "4ab2f092-f6e2-4db8-b5ac-bfb83fada584"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.890399 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "4ab2f092-f6e2-4db8-b5ac-bfb83fada584" (UID: "4ab2f092-f6e2-4db8-b5ac-bfb83fada584"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.952493 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.952729 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgtn6\" (UniqueName: \"kubernetes.io/projected/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-kube-api-access-cgtn6\") on node \"crc\" DevicePath \"\"" Jan 29 08:38:29 crc kubenswrapper[4861]: I0129 08:38:29.952850 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4ab2f092-f6e2-4db8-b5ac-bfb83fada584-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.170177 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" event={"ID":"4ab2f092-f6e2-4db8-b5ac-bfb83fada584","Type":"ContainerDied","Data":"94551d5a065c0bd7979fdf1ad2d685bccf4571db3894abd358243e64eadd437a"} Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.170216 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94551d5a065c0bd7979fdf1ad2d685bccf4571db3894abd358243e64eadd437a" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.170248 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-l99sq" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.261800 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-l5f5v"] Jan 29 08:38:30 crc kubenswrapper[4861]: E0129 08:38:30.262469 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" containerName="extract-utilities" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.262607 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" containerName="extract-utilities" Jan 29 08:38:30 crc kubenswrapper[4861]: E0129 08:38:30.262711 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ab2f092-f6e2-4db8-b5ac-bfb83fada584" containerName="download-cache-openstack-openstack-cell1" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.262790 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ab2f092-f6e2-4db8-b5ac-bfb83fada584" containerName="download-cache-openstack-openstack-cell1" Jan 29 08:38:30 crc kubenswrapper[4861]: E0129 08:38:30.262876 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" containerName="extract-content" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.262955 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" containerName="extract-content" Jan 29 08:38:30 crc kubenswrapper[4861]: E0129 08:38:30.263043 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" containerName="registry-server" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.263139 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" containerName="registry-server" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.263478 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ab2f092-f6e2-4db8-b5ac-bfb83fada584" containerName="download-cache-openstack-openstack-cell1" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.263563 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4740371-a108-4477-9f3e-331a56bed8aa" containerName="registry-server" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.264557 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.275574 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.275892 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.276037 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.276664 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.281724 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-l5f5v"] Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.378393 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-inventory\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.378793 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpgs2\" (UniqueName: \"kubernetes.io/projected/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-kube-api-access-fpgs2\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.379545 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.481708 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-inventory\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.482026 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpgs2\" (UniqueName: \"kubernetes.io/projected/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-kube-api-access-fpgs2\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.482058 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.485943 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.486051 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-inventory\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.502735 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpgs2\" (UniqueName: \"kubernetes.io/projected/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-kube-api-access-fpgs2\") pod \"configure-network-openstack-openstack-cell1-l5f5v\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:30 crc kubenswrapper[4861]: I0129 08:38:30.594569 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:38:31 crc kubenswrapper[4861]: W0129 08:38:31.122659 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc61e4b86_43cc_4b96_85dc_74a0d969b4e5.slice/crio-8d14e738244067f173ae330fff3becf104c05ebf297be8a448108c4ee27ad3e4 WatchSource:0}: Error finding container 8d14e738244067f173ae330fff3becf104c05ebf297be8a448108c4ee27ad3e4: Status 404 returned error can't find the container with id 8d14e738244067f173ae330fff3becf104c05ebf297be8a448108c4ee27ad3e4 Jan 29 08:38:31 crc kubenswrapper[4861]: I0129 08:38:31.128838 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-l5f5v"] Jan 29 08:38:31 crc kubenswrapper[4861]: I0129 08:38:31.179803 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" event={"ID":"c61e4b86-43cc-4b96-85dc-74a0d969b4e5","Type":"ContainerStarted","Data":"8d14e738244067f173ae330fff3becf104c05ebf297be8a448108c4ee27ad3e4"} Jan 29 08:38:32 crc kubenswrapper[4861]: I0129 08:38:32.192419 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" event={"ID":"c61e4b86-43cc-4b96-85dc-74a0d969b4e5","Type":"ContainerStarted","Data":"a6dace1d6907d87d47408f57c301f484572bbf89035445bde06a9870dbf757a8"} Jan 29 08:38:32 crc kubenswrapper[4861]: I0129 08:38:32.218285 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" podStartSLOduration=1.788352731 podStartE2EDuration="2.218245277s" podCreationTimestamp="2026-01-29 08:38:30 +0000 UTC" firstStartedPulling="2026-01-29 08:38:31.125846048 +0000 UTC m=+7402.797340605" lastFinishedPulling="2026-01-29 08:38:31.555738594 +0000 UTC m=+7403.227233151" observedRunningTime="2026-01-29 08:38:32.216142271 +0000 UTC m=+7403.887636828" watchObservedRunningTime="2026-01-29 08:38:32.218245277 +0000 UTC m=+7403.889739844" Jan 29 08:39:52 crc kubenswrapper[4861]: I0129 08:39:52.108705 4861 generic.go:334] "Generic (PLEG): container finished" podID="c61e4b86-43cc-4b96-85dc-74a0d969b4e5" containerID="a6dace1d6907d87d47408f57c301f484572bbf89035445bde06a9870dbf757a8" exitCode=0 Jan 29 08:39:52 crc kubenswrapper[4861]: I0129 08:39:52.109247 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" event={"ID":"c61e4b86-43cc-4b96-85dc-74a0d969b4e5","Type":"ContainerDied","Data":"a6dace1d6907d87d47408f57c301f484572bbf89035445bde06a9870dbf757a8"} Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.561282 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.724997 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-inventory\") pod \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.725346 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpgs2\" (UniqueName: \"kubernetes.io/projected/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-kube-api-access-fpgs2\") pod \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.725519 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-ssh-key-openstack-cell1\") pod \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\" (UID: \"c61e4b86-43cc-4b96-85dc-74a0d969b4e5\") " Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.737087 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-kube-api-access-fpgs2" (OuterVolumeSpecName: "kube-api-access-fpgs2") pod "c61e4b86-43cc-4b96-85dc-74a0d969b4e5" (UID: "c61e4b86-43cc-4b96-85dc-74a0d969b4e5"). InnerVolumeSpecName "kube-api-access-fpgs2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.758696 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "c61e4b86-43cc-4b96-85dc-74a0d969b4e5" (UID: "c61e4b86-43cc-4b96-85dc-74a0d969b4e5"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.758886 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-inventory" (OuterVolumeSpecName: "inventory") pod "c61e4b86-43cc-4b96-85dc-74a0d969b4e5" (UID: "c61e4b86-43cc-4b96-85dc-74a0d969b4e5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.829701 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.829759 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpgs2\" (UniqueName: \"kubernetes.io/projected/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-kube-api-access-fpgs2\") on node \"crc\" DevicePath \"\"" Jan 29 08:39:53 crc kubenswrapper[4861]: I0129 08:39:53.829781 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c61e4b86-43cc-4b96-85dc-74a0d969b4e5-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.130344 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" event={"ID":"c61e4b86-43cc-4b96-85dc-74a0d969b4e5","Type":"ContainerDied","Data":"8d14e738244067f173ae330fff3becf104c05ebf297be8a448108c4ee27ad3e4"} Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.130384 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d14e738244067f173ae330fff3becf104c05ebf297be8a448108c4ee27ad3e4" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.130401 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-l5f5v" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.226117 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-8strq"] Jan 29 08:39:54 crc kubenswrapper[4861]: E0129 08:39:54.226595 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c61e4b86-43cc-4b96-85dc-74a0d969b4e5" containerName="configure-network-openstack-openstack-cell1" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.226613 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c61e4b86-43cc-4b96-85dc-74a0d969b4e5" containerName="configure-network-openstack-openstack-cell1" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.226790 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c61e4b86-43cc-4b96-85dc-74a0d969b4e5" containerName="configure-network-openstack-openstack-cell1" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.227520 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.229566 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.229639 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.229827 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.229885 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.239429 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-8strq"] Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.337991 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-inventory\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.338203 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqgp7\" (UniqueName: \"kubernetes.io/projected/9b1d958c-687e-4686-b8ac-87ea982bb0d9-kube-api-access-vqgp7\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.338269 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.441529 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-inventory\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.441723 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqgp7\" (UniqueName: \"kubernetes.io/projected/9b1d958c-687e-4686-b8ac-87ea982bb0d9-kube-api-access-vqgp7\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.441796 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.448598 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-inventory\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.452801 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.460778 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqgp7\" (UniqueName: \"kubernetes.io/projected/9b1d958c-687e-4686-b8ac-87ea982bb0d9-kube-api-access-vqgp7\") pod \"validate-network-openstack-openstack-cell1-8strq\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:54 crc kubenswrapper[4861]: I0129 08:39:54.545494 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:39:55 crc kubenswrapper[4861]: I0129 08:39:55.155007 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-8strq"] Jan 29 08:39:55 crc kubenswrapper[4861]: I0129 08:39:55.165996 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:39:56 crc kubenswrapper[4861]: I0129 08:39:56.164993 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-8strq" event={"ID":"9b1d958c-687e-4686-b8ac-87ea982bb0d9","Type":"ContainerStarted","Data":"6f10d069e44a5c445c32337b5c8d3df1722fc02c1296a96a7b895f5c5cbaf699"} Jan 29 08:39:56 crc kubenswrapper[4861]: I0129 08:39:56.166092 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-8strq" event={"ID":"9b1d958c-687e-4686-b8ac-87ea982bb0d9","Type":"ContainerStarted","Data":"1d82bceea13675e413b4efe8b3f39a2d4416dce6e8e8865c76f79560877e5f5e"} Jan 29 08:39:56 crc kubenswrapper[4861]: I0129 08:39:56.188426 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-8strq" podStartSLOduration=1.7677888080000002 podStartE2EDuration="2.188407787s" podCreationTimestamp="2026-01-29 08:39:54 +0000 UTC" firstStartedPulling="2026-01-29 08:39:55.165772573 +0000 UTC m=+7486.837267130" lastFinishedPulling="2026-01-29 08:39:55.586391552 +0000 UTC m=+7487.257886109" observedRunningTime="2026-01-29 08:39:56.184426051 +0000 UTC m=+7487.855920608" watchObservedRunningTime="2026-01-29 08:39:56.188407787 +0000 UTC m=+7487.859902344" Jan 29 08:40:01 crc kubenswrapper[4861]: I0129 08:40:01.216472 4861 generic.go:334] "Generic (PLEG): container finished" podID="9b1d958c-687e-4686-b8ac-87ea982bb0d9" containerID="6f10d069e44a5c445c32337b5c8d3df1722fc02c1296a96a7b895f5c5cbaf699" exitCode=0 Jan 29 08:40:01 crc kubenswrapper[4861]: I0129 08:40:01.217011 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-8strq" event={"ID":"9b1d958c-687e-4686-b8ac-87ea982bb0d9","Type":"ContainerDied","Data":"6f10d069e44a5c445c32337b5c8d3df1722fc02c1296a96a7b895f5c5cbaf699"} Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.659824 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.765876 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-inventory\") pod \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.766034 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-ssh-key-openstack-cell1\") pod \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.766226 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqgp7\" (UniqueName: \"kubernetes.io/projected/9b1d958c-687e-4686-b8ac-87ea982bb0d9-kube-api-access-vqgp7\") pod \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\" (UID: \"9b1d958c-687e-4686-b8ac-87ea982bb0d9\") " Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.786545 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b1d958c-687e-4686-b8ac-87ea982bb0d9-kube-api-access-vqgp7" (OuterVolumeSpecName: "kube-api-access-vqgp7") pod "9b1d958c-687e-4686-b8ac-87ea982bb0d9" (UID: "9b1d958c-687e-4686-b8ac-87ea982bb0d9"). InnerVolumeSpecName "kube-api-access-vqgp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.801490 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-inventory" (OuterVolumeSpecName: "inventory") pod "9b1d958c-687e-4686-b8ac-87ea982bb0d9" (UID: "9b1d958c-687e-4686-b8ac-87ea982bb0d9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.803128 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "9b1d958c-687e-4686-b8ac-87ea982bb0d9" (UID: "9b1d958c-687e-4686-b8ac-87ea982bb0d9"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.872179 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.872214 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqgp7\" (UniqueName: \"kubernetes.io/projected/9b1d958c-687e-4686-b8ac-87ea982bb0d9-kube-api-access-vqgp7\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:02 crc kubenswrapper[4861]: I0129 08:40:02.872226 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b1d958c-687e-4686-b8ac-87ea982bb0d9-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.239330 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-8strq" event={"ID":"9b1d958c-687e-4686-b8ac-87ea982bb0d9","Type":"ContainerDied","Data":"1d82bceea13675e413b4efe8b3f39a2d4416dce6e8e8865c76f79560877e5f5e"} Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.239605 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d82bceea13675e413b4efe8b3f39a2d4416dce6e8e8865c76f79560877e5f5e" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.239552 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-8strq" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.419156 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-r2w5q"] Jan 29 08:40:03 crc kubenswrapper[4861]: E0129 08:40:03.419947 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b1d958c-687e-4686-b8ac-87ea982bb0d9" containerName="validate-network-openstack-openstack-cell1" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.419965 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b1d958c-687e-4686-b8ac-87ea982bb0d9" containerName="validate-network-openstack-openstack-cell1" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.420727 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b1d958c-687e-4686-b8ac-87ea982bb0d9" containerName="validate-network-openstack-openstack-cell1" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.426734 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.429733 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.429918 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.443531 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.443854 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.475122 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-r2w5q"] Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.513013 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d5cg\" (UniqueName: \"kubernetes.io/projected/6272a2bc-68af-4bb6-9e65-ba51d3183b87-kube-api-access-5d5cg\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.513225 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.513307 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-inventory\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.616795 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.617197 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-inventory\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.617267 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d5cg\" (UniqueName: \"kubernetes.io/projected/6272a2bc-68af-4bb6-9e65-ba51d3183b87-kube-api-access-5d5cg\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.629994 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.631839 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-inventory\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.636663 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d5cg\" (UniqueName: \"kubernetes.io/projected/6272a2bc-68af-4bb6-9e65-ba51d3183b87-kube-api-access-5d5cg\") pod \"install-os-openstack-openstack-cell1-r2w5q\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:03 crc kubenswrapper[4861]: I0129 08:40:03.779441 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:04 crc kubenswrapper[4861]: I0129 08:40:04.414142 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-r2w5q"] Jan 29 08:40:05 crc kubenswrapper[4861]: I0129 08:40:05.282475 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" event={"ID":"6272a2bc-68af-4bb6-9e65-ba51d3183b87","Type":"ContainerStarted","Data":"9bdc6c3b457efe2cc89d5af2189dcb9bb6ee5a9e83236cb6147e047f0dad99e9"} Jan 29 08:40:06 crc kubenswrapper[4861]: I0129 08:40:06.296769 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" event={"ID":"6272a2bc-68af-4bb6-9e65-ba51d3183b87","Type":"ContainerStarted","Data":"b7acd4fecc548aaa9c8ef46fc04f78bffbf4a8ae719276ea4b3727fdb8777d3b"} Jan 29 08:40:06 crc kubenswrapper[4861]: I0129 08:40:06.318157 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" podStartSLOduration=2.610966545 podStartE2EDuration="3.318140146s" podCreationTimestamp="2026-01-29 08:40:03 +0000 UTC" firstStartedPulling="2026-01-29 08:40:04.416031437 +0000 UTC m=+7496.087526004" lastFinishedPulling="2026-01-29 08:40:05.123205048 +0000 UTC m=+7496.794699605" observedRunningTime="2026-01-29 08:40:06.315504166 +0000 UTC m=+7497.986998733" watchObservedRunningTime="2026-01-29 08:40:06.318140146 +0000 UTC m=+7497.989634703" Jan 29 08:40:30 crc kubenswrapper[4861]: I0129 08:40:30.630431 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:40:30 crc kubenswrapper[4861]: I0129 08:40:30.630922 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:40:32 crc kubenswrapper[4861]: I0129 08:40:32.719887 4861 scope.go:117] "RemoveContainer" containerID="edb373ffed7168c3c8a2c2d5358a0d7251621860c27be1195cc884f97d2d2cfe" Jan 29 08:40:32 crc kubenswrapper[4861]: I0129 08:40:32.748906 4861 scope.go:117] "RemoveContainer" containerID="3ba282ca35f76844e8efa3dc2a0340aea82ea0c5b63ba940a019bf94519154fd" Jan 29 08:40:32 crc kubenswrapper[4861]: I0129 08:40:32.776001 4861 scope.go:117] "RemoveContainer" containerID="417d316a177d1ebed5bdf5706933af7fafa26c59e987c2f2aa4053b89018b8cc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.342198 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2t7mc"] Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.347351 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.364462 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2t7mc"] Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.421795 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-utilities\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.422615 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zr697\" (UniqueName: \"kubernetes.io/projected/b1e46956-ff67-4f66-a222-545324fe670a-kube-api-access-zr697\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.423268 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-catalog-content\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.525334 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-utilities\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.525450 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zr697\" (UniqueName: \"kubernetes.io/projected/b1e46956-ff67-4f66-a222-545324fe670a-kube-api-access-zr697\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.525484 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-catalog-content\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.525983 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-catalog-content\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.526989 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-utilities\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.557738 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zr697\" (UniqueName: \"kubernetes.io/projected/b1e46956-ff67-4f66-a222-545324fe670a-kube-api-access-zr697\") pod \"certified-operators-2t7mc\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:42 crc kubenswrapper[4861]: I0129 08:40:42.704394 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:43 crc kubenswrapper[4861]: I0129 08:40:43.223295 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2t7mc"] Jan 29 08:40:43 crc kubenswrapper[4861]: I0129 08:40:43.660161 4861 generic.go:334] "Generic (PLEG): container finished" podID="b1e46956-ff67-4f66-a222-545324fe670a" containerID="ddb38801310057bc8faa0ad91cfac1412fcca6eb7111521c89cc531a3e143a1d" exitCode=0 Jan 29 08:40:43 crc kubenswrapper[4861]: I0129 08:40:43.660235 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2t7mc" event={"ID":"b1e46956-ff67-4f66-a222-545324fe670a","Type":"ContainerDied","Data":"ddb38801310057bc8faa0ad91cfac1412fcca6eb7111521c89cc531a3e143a1d"} Jan 29 08:40:43 crc kubenswrapper[4861]: I0129 08:40:43.660568 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2t7mc" event={"ID":"b1e46956-ff67-4f66-a222-545324fe670a","Type":"ContainerStarted","Data":"3d2fff1c5cbfbe6cae4684e7071c5c88fde63cd527d3f8f58f4c276e3fe2b788"} Jan 29 08:40:44 crc kubenswrapper[4861]: I0129 08:40:44.672388 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2t7mc" event={"ID":"b1e46956-ff67-4f66-a222-545324fe670a","Type":"ContainerStarted","Data":"bda2b0a562d83523ff4e83d2a1b7c45f89921e84c3246eb39a3f19f6f25091ef"} Jan 29 08:40:45 crc kubenswrapper[4861]: I0129 08:40:45.683994 4861 generic.go:334] "Generic (PLEG): container finished" podID="b1e46956-ff67-4f66-a222-545324fe670a" containerID="bda2b0a562d83523ff4e83d2a1b7c45f89921e84c3246eb39a3f19f6f25091ef" exitCode=0 Jan 29 08:40:45 crc kubenswrapper[4861]: I0129 08:40:45.684133 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2t7mc" event={"ID":"b1e46956-ff67-4f66-a222-545324fe670a","Type":"ContainerDied","Data":"bda2b0a562d83523ff4e83d2a1b7c45f89921e84c3246eb39a3f19f6f25091ef"} Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.115097 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p6tdj"] Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.125914 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.149720 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p6tdj"] Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.206374 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-catalog-content\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.206456 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-utilities\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.208150 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6rxr\" (UniqueName: \"kubernetes.io/projected/93ac62cc-73cb-4549-8563-9d16250f5ec8-kube-api-access-f6rxr\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.311610 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6rxr\" (UniqueName: \"kubernetes.io/projected/93ac62cc-73cb-4549-8563-9d16250f5ec8-kube-api-access-f6rxr\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.311751 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-catalog-content\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.311798 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-utilities\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.312881 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-utilities\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.313121 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-catalog-content\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.340882 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6rxr\" (UniqueName: \"kubernetes.io/projected/93ac62cc-73cb-4549-8563-9d16250f5ec8-kube-api-access-f6rxr\") pod \"redhat-operators-p6tdj\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.455622 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.698011 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2t7mc" event={"ID":"b1e46956-ff67-4f66-a222-545324fe670a","Type":"ContainerStarted","Data":"740338a2c75e93285518143fa3a64624097f515d5eaccdb84ce5b9fc553437f0"} Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.720670 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2t7mc" podStartSLOduration=2.275876003 podStartE2EDuration="4.720649548s" podCreationTimestamp="2026-01-29 08:40:42 +0000 UTC" firstStartedPulling="2026-01-29 08:40:43.662337601 +0000 UTC m=+7535.333832158" lastFinishedPulling="2026-01-29 08:40:46.107111146 +0000 UTC m=+7537.778605703" observedRunningTime="2026-01-29 08:40:46.717036871 +0000 UTC m=+7538.388531438" watchObservedRunningTime="2026-01-29 08:40:46.720649548 +0000 UTC m=+7538.392144115" Jan 29 08:40:46 crc kubenswrapper[4861]: I0129 08:40:46.973659 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p6tdj"] Jan 29 08:40:46 crc kubenswrapper[4861]: W0129 08:40:46.974015 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93ac62cc_73cb_4549_8563_9d16250f5ec8.slice/crio-9760ed1440fc03dfbd572b88a719f8a64c45a12b7a5db5f5aaf8d68ae7a7ec53 WatchSource:0}: Error finding container 9760ed1440fc03dfbd572b88a719f8a64c45a12b7a5db5f5aaf8d68ae7a7ec53: Status 404 returned error can't find the container with id 9760ed1440fc03dfbd572b88a719f8a64c45a12b7a5db5f5aaf8d68ae7a7ec53 Jan 29 08:40:47 crc kubenswrapper[4861]: I0129 08:40:47.707153 4861 generic.go:334] "Generic (PLEG): container finished" podID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerID="14c997a806f40d708d8949754e972dd82f946c675ece494b9c4ab00c8263bb4a" exitCode=0 Jan 29 08:40:47 crc kubenswrapper[4861]: I0129 08:40:47.707222 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p6tdj" event={"ID":"93ac62cc-73cb-4549-8563-9d16250f5ec8","Type":"ContainerDied","Data":"14c997a806f40d708d8949754e972dd82f946c675ece494b9c4ab00c8263bb4a"} Jan 29 08:40:47 crc kubenswrapper[4861]: I0129 08:40:47.707448 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p6tdj" event={"ID":"93ac62cc-73cb-4549-8563-9d16250f5ec8","Type":"ContainerStarted","Data":"9760ed1440fc03dfbd572b88a719f8a64c45a12b7a5db5f5aaf8d68ae7a7ec53"} Jan 29 08:40:48 crc kubenswrapper[4861]: I0129 08:40:48.719430 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p6tdj" event={"ID":"93ac62cc-73cb-4549-8563-9d16250f5ec8","Type":"ContainerStarted","Data":"d6c34cd94787b8b6ca9b0f55bc430793b4148d089f9372068fb9488936257f50"} Jan 29 08:40:50 crc kubenswrapper[4861]: I0129 08:40:50.748794 4861 generic.go:334] "Generic (PLEG): container finished" podID="6272a2bc-68af-4bb6-9e65-ba51d3183b87" containerID="b7acd4fecc548aaa9c8ef46fc04f78bffbf4a8ae719276ea4b3727fdb8777d3b" exitCode=0 Jan 29 08:40:50 crc kubenswrapper[4861]: I0129 08:40:50.749379 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" event={"ID":"6272a2bc-68af-4bb6-9e65-ba51d3183b87","Type":"ContainerDied","Data":"b7acd4fecc548aaa9c8ef46fc04f78bffbf4a8ae719276ea4b3727fdb8777d3b"} Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.449020 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.547309 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-inventory\") pod \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.547503 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-ssh-key-openstack-cell1\") pod \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.547619 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5d5cg\" (UniqueName: \"kubernetes.io/projected/6272a2bc-68af-4bb6-9e65-ba51d3183b87-kube-api-access-5d5cg\") pod \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\" (UID: \"6272a2bc-68af-4bb6-9e65-ba51d3183b87\") " Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.574030 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6272a2bc-68af-4bb6-9e65-ba51d3183b87-kube-api-access-5d5cg" (OuterVolumeSpecName: "kube-api-access-5d5cg") pod "6272a2bc-68af-4bb6-9e65-ba51d3183b87" (UID: "6272a2bc-68af-4bb6-9e65-ba51d3183b87"). InnerVolumeSpecName "kube-api-access-5d5cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.601332 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "6272a2bc-68af-4bb6-9e65-ba51d3183b87" (UID: "6272a2bc-68af-4bb6-9e65-ba51d3183b87"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.608293 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-inventory" (OuterVolumeSpecName: "inventory") pod "6272a2bc-68af-4bb6-9e65-ba51d3183b87" (UID: "6272a2bc-68af-4bb6-9e65-ba51d3183b87"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.651720 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.651787 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6272a2bc-68af-4bb6-9e65-ba51d3183b87-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.651804 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5d5cg\" (UniqueName: \"kubernetes.io/projected/6272a2bc-68af-4bb6-9e65-ba51d3183b87-kube-api-access-5d5cg\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.705045 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.705124 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.756817 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.785585 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.786341 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-r2w5q" event={"ID":"6272a2bc-68af-4bb6-9e65-ba51d3183b87","Type":"ContainerDied","Data":"9bdc6c3b457efe2cc89d5af2189dcb9bb6ee5a9e83236cb6147e047f0dad99e9"} Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.786383 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bdc6c3b457efe2cc89d5af2189dcb9bb6ee5a9e83236cb6147e047f0dad99e9" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.841507 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.878095 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-5kzn6"] Jan 29 08:40:52 crc kubenswrapper[4861]: E0129 08:40:52.878820 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6272a2bc-68af-4bb6-9e65-ba51d3183b87" containerName="install-os-openstack-openstack-cell1" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.878923 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6272a2bc-68af-4bb6-9e65-ba51d3183b87" containerName="install-os-openstack-openstack-cell1" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.879216 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6272a2bc-68af-4bb6-9e65-ba51d3183b87" containerName="install-os-openstack-openstack-cell1" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.880143 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.886608 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.886654 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.887235 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.889286 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.920230 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-5kzn6"] Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.970665 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.970709 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:52 crc kubenswrapper[4861]: I0129 08:40:52.970852 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jphj\" (UniqueName: \"kubernetes.io/projected/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-kube-api-access-6jphj\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.000461 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2t7mc"] Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.073302 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.073366 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.073443 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jphj\" (UniqueName: \"kubernetes.io/projected/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-kube-api-access-6jphj\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.078855 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.086035 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.091031 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jphj\" (UniqueName: \"kubernetes.io/projected/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-kube-api-access-6jphj\") pod \"configure-os-openstack-openstack-cell1-5kzn6\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.212056 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:40:53 crc kubenswrapper[4861]: I0129 08:40:53.835541 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-5kzn6"] Jan 29 08:40:53 crc kubenswrapper[4861]: W0129 08:40:53.838818 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9c6b766_edc9_4b0b_a2d2_a54171a0570a.slice/crio-adc88808087857e8bb6c02b02ed64e7e95f5990bcb738f51106be408dcbe0036 WatchSource:0}: Error finding container adc88808087857e8bb6c02b02ed64e7e95f5990bcb738f51106be408dcbe0036: Status 404 returned error can't find the container with id adc88808087857e8bb6c02b02ed64e7e95f5990bcb738f51106be408dcbe0036 Jan 29 08:40:54 crc kubenswrapper[4861]: I0129 08:40:54.803014 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" event={"ID":"b9c6b766-edc9-4b0b-a2d2-a54171a0570a","Type":"ContainerStarted","Data":"adc88808087857e8bb6c02b02ed64e7e95f5990bcb738f51106be408dcbe0036"} Jan 29 08:40:54 crc kubenswrapper[4861]: I0129 08:40:54.803230 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2t7mc" podUID="b1e46956-ff67-4f66-a222-545324fe670a" containerName="registry-server" containerID="cri-o://740338a2c75e93285518143fa3a64624097f515d5eaccdb84ce5b9fc553437f0" gracePeriod=2 Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.816693 4861 generic.go:334] "Generic (PLEG): container finished" podID="b1e46956-ff67-4f66-a222-545324fe670a" containerID="740338a2c75e93285518143fa3a64624097f515d5eaccdb84ce5b9fc553437f0" exitCode=0 Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.817156 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2t7mc" event={"ID":"b1e46956-ff67-4f66-a222-545324fe670a","Type":"ContainerDied","Data":"740338a2c75e93285518143fa3a64624097f515d5eaccdb84ce5b9fc553437f0"} Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.817187 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2t7mc" event={"ID":"b1e46956-ff67-4f66-a222-545324fe670a","Type":"ContainerDied","Data":"3d2fff1c5cbfbe6cae4684e7071c5c88fde63cd527d3f8f58f4c276e3fe2b788"} Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.817199 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d2fff1c5cbfbe6cae4684e7071c5c88fde63cd527d3f8f58f4c276e3fe2b788" Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.818330 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" event={"ID":"b9c6b766-edc9-4b0b-a2d2-a54171a0570a","Type":"ContainerStarted","Data":"0624f04b13d25bc60724ed281acbb3a4398fb68557c7b9eba9b825421f79a774"} Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.824873 4861 generic.go:334] "Generic (PLEG): container finished" podID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerID="d6c34cd94787b8b6ca9b0f55bc430793b4148d089f9372068fb9488936257f50" exitCode=0 Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.824912 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p6tdj" event={"ID":"93ac62cc-73cb-4549-8563-9d16250f5ec8","Type":"ContainerDied","Data":"d6c34cd94787b8b6ca9b0f55bc430793b4148d089f9372068fb9488936257f50"} Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.842616 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" podStartSLOduration=3.033741651 podStartE2EDuration="3.842595458s" podCreationTimestamp="2026-01-29 08:40:52 +0000 UTC" firstStartedPulling="2026-01-29 08:40:53.841347111 +0000 UTC m=+7545.512841668" lastFinishedPulling="2026-01-29 08:40:54.650200918 +0000 UTC m=+7546.321695475" observedRunningTime="2026-01-29 08:40:55.838187851 +0000 UTC m=+7547.509682428" watchObservedRunningTime="2026-01-29 08:40:55.842595458 +0000 UTC m=+7547.514090015" Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.862208 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.944239 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-utilities\") pod \"b1e46956-ff67-4f66-a222-545324fe670a\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.944328 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zr697\" (UniqueName: \"kubernetes.io/projected/b1e46956-ff67-4f66-a222-545324fe670a-kube-api-access-zr697\") pod \"b1e46956-ff67-4f66-a222-545324fe670a\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.944474 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-catalog-content\") pod \"b1e46956-ff67-4f66-a222-545324fe670a\" (UID: \"b1e46956-ff67-4f66-a222-545324fe670a\") " Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.946315 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-utilities" (OuterVolumeSpecName: "utilities") pod "b1e46956-ff67-4f66-a222-545324fe670a" (UID: "b1e46956-ff67-4f66-a222-545324fe670a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.951540 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1e46956-ff67-4f66-a222-545324fe670a-kube-api-access-zr697" (OuterVolumeSpecName: "kube-api-access-zr697") pod "b1e46956-ff67-4f66-a222-545324fe670a" (UID: "b1e46956-ff67-4f66-a222-545324fe670a"). InnerVolumeSpecName "kube-api-access-zr697". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:40:55 crc kubenswrapper[4861]: I0129 08:40:55.989209 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b1e46956-ff67-4f66-a222-545324fe670a" (UID: "b1e46956-ff67-4f66-a222-545324fe670a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:40:56 crc kubenswrapper[4861]: I0129 08:40:56.048652 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:56 crc kubenswrapper[4861]: I0129 08:40:56.049297 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1e46956-ff67-4f66-a222-545324fe670a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:56 crc kubenswrapper[4861]: I0129 08:40:56.049318 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zr697\" (UniqueName: \"kubernetes.io/projected/b1e46956-ff67-4f66-a222-545324fe670a-kube-api-access-zr697\") on node \"crc\" DevicePath \"\"" Jan 29 08:40:56 crc kubenswrapper[4861]: I0129 08:40:56.834865 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p6tdj" event={"ID":"93ac62cc-73cb-4549-8563-9d16250f5ec8","Type":"ContainerStarted","Data":"d3790053c6d7ff3d3b290a0477451cbe3b49326733196b06bb04f52b38691d6c"} Jan 29 08:40:56 crc kubenswrapper[4861]: I0129 08:40:56.835087 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2t7mc" Jan 29 08:40:56 crc kubenswrapper[4861]: I0129 08:40:56.864799 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p6tdj" podStartSLOduration=2.245094271 podStartE2EDuration="10.86477667s" podCreationTimestamp="2026-01-29 08:40:46 +0000 UTC" firstStartedPulling="2026-01-29 08:40:47.708656541 +0000 UTC m=+7539.380151098" lastFinishedPulling="2026-01-29 08:40:56.32833894 +0000 UTC m=+7547.999833497" observedRunningTime="2026-01-29 08:40:56.852872984 +0000 UTC m=+7548.524367561" watchObservedRunningTime="2026-01-29 08:40:56.86477667 +0000 UTC m=+7548.536271227" Jan 29 08:40:56 crc kubenswrapper[4861]: I0129 08:40:56.888356 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2t7mc"] Jan 29 08:40:56 crc kubenswrapper[4861]: I0129 08:40:56.898486 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2t7mc"] Jan 29 08:40:57 crc kubenswrapper[4861]: I0129 08:40:57.127960 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1e46956-ff67-4f66-a222-545324fe670a" path="/var/lib/kubelet/pods/b1e46956-ff67-4f66-a222-545324fe670a/volumes" Jan 29 08:41:00 crc kubenswrapper[4861]: I0129 08:41:00.629647 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:41:00 crc kubenswrapper[4861]: I0129 08:41:00.630253 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:41:06 crc kubenswrapper[4861]: I0129 08:41:06.456707 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:41:06 crc kubenswrapper[4861]: I0129 08:41:06.457321 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:41:07 crc kubenswrapper[4861]: I0129 08:41:07.500405 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p6tdj" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="registry-server" probeResult="failure" output=< Jan 29 08:41:07 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:41:07 crc kubenswrapper[4861]: > Jan 29 08:41:17 crc kubenswrapper[4861]: I0129 08:41:17.502782 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p6tdj" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="registry-server" probeResult="failure" output=< Jan 29 08:41:17 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:41:17 crc kubenswrapper[4861]: > Jan 29 08:41:26 crc kubenswrapper[4861]: I0129 08:41:26.506286 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:41:26 crc kubenswrapper[4861]: I0129 08:41:26.558179 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:41:28 crc kubenswrapper[4861]: I0129 08:41:28.661378 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p6tdj"] Jan 29 08:41:28 crc kubenswrapper[4861]: I0129 08:41:28.662432 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p6tdj" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="registry-server" containerID="cri-o://d3790053c6d7ff3d3b290a0477451cbe3b49326733196b06bb04f52b38691d6c" gracePeriod=2 Jan 29 08:41:29 crc kubenswrapper[4861]: I0129 08:41:29.167972 4861 generic.go:334] "Generic (PLEG): container finished" podID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerID="d3790053c6d7ff3d3b290a0477451cbe3b49326733196b06bb04f52b38691d6c" exitCode=0 Jan 29 08:41:29 crc kubenswrapper[4861]: I0129 08:41:29.168017 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p6tdj" event={"ID":"93ac62cc-73cb-4549-8563-9d16250f5ec8","Type":"ContainerDied","Data":"d3790053c6d7ff3d3b290a0477451cbe3b49326733196b06bb04f52b38691d6c"} Jan 29 08:41:29 crc kubenswrapper[4861]: I0129 08:41:29.855775 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:41:29 crc kubenswrapper[4861]: I0129 08:41:29.902823 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-catalog-content\") pod \"93ac62cc-73cb-4549-8563-9d16250f5ec8\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " Jan 29 08:41:29 crc kubenswrapper[4861]: I0129 08:41:29.902983 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-utilities\") pod \"93ac62cc-73cb-4549-8563-9d16250f5ec8\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " Jan 29 08:41:29 crc kubenswrapper[4861]: I0129 08:41:29.903181 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6rxr\" (UniqueName: \"kubernetes.io/projected/93ac62cc-73cb-4549-8563-9d16250f5ec8-kube-api-access-f6rxr\") pod \"93ac62cc-73cb-4549-8563-9d16250f5ec8\" (UID: \"93ac62cc-73cb-4549-8563-9d16250f5ec8\") " Jan 29 08:41:29 crc kubenswrapper[4861]: I0129 08:41:29.903876 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-utilities" (OuterVolumeSpecName: "utilities") pod "93ac62cc-73cb-4549-8563-9d16250f5ec8" (UID: "93ac62cc-73cb-4549-8563-9d16250f5ec8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:41:29 crc kubenswrapper[4861]: I0129 08:41:29.908239 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93ac62cc-73cb-4549-8563-9d16250f5ec8-kube-api-access-f6rxr" (OuterVolumeSpecName: "kube-api-access-f6rxr") pod "93ac62cc-73cb-4549-8563-9d16250f5ec8" (UID: "93ac62cc-73cb-4549-8563-9d16250f5ec8"). InnerVolumeSpecName "kube-api-access-f6rxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.008039 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6rxr\" (UniqueName: \"kubernetes.io/projected/93ac62cc-73cb-4549-8563-9d16250f5ec8-kube-api-access-f6rxr\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.008116 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.041869 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93ac62cc-73cb-4549-8563-9d16250f5ec8" (UID: "93ac62cc-73cb-4549-8563-9d16250f5ec8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.110764 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93ac62cc-73cb-4549-8563-9d16250f5ec8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.180010 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p6tdj" event={"ID":"93ac62cc-73cb-4549-8563-9d16250f5ec8","Type":"ContainerDied","Data":"9760ed1440fc03dfbd572b88a719f8a64c45a12b7a5db5f5aaf8d68ae7a7ec53"} Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.180053 4861 scope.go:117] "RemoveContainer" containerID="d3790053c6d7ff3d3b290a0477451cbe3b49326733196b06bb04f52b38691d6c" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.180064 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p6tdj" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.206424 4861 scope.go:117] "RemoveContainer" containerID="d6c34cd94787b8b6ca9b0f55bc430793b4148d089f9372068fb9488936257f50" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.217179 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p6tdj"] Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.226278 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p6tdj"] Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.243302 4861 scope.go:117] "RemoveContainer" containerID="14c997a806f40d708d8949754e972dd82f946c675ece494b9c4ab00c8263bb4a" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.629646 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.630209 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.630251 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.630932 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5a4be9b1e7d970af3d8f175e27e26eae76f837f496a6c2d96c5fa17cc090c87a"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:41:30 crc kubenswrapper[4861]: I0129 08:41:30.630988 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://5a4be9b1e7d970af3d8f175e27e26eae76f837f496a6c2d96c5fa17cc090c87a" gracePeriod=600 Jan 29 08:41:31 crc kubenswrapper[4861]: I0129 08:41:31.129895 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" path="/var/lib/kubelet/pods/93ac62cc-73cb-4549-8563-9d16250f5ec8/volumes" Jan 29 08:41:31 crc kubenswrapper[4861]: I0129 08:41:31.192597 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="5a4be9b1e7d970af3d8f175e27e26eae76f837f496a6c2d96c5fa17cc090c87a" exitCode=0 Jan 29 08:41:31 crc kubenswrapper[4861]: I0129 08:41:31.192649 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"5a4be9b1e7d970af3d8f175e27e26eae76f837f496a6c2d96c5fa17cc090c87a"} Jan 29 08:41:31 crc kubenswrapper[4861]: I0129 08:41:31.192689 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af"} Jan 29 08:41:31 crc kubenswrapper[4861]: I0129 08:41:31.192707 4861 scope.go:117] "RemoveContainer" containerID="c3468d5bf636b7da72ee84618cf8402ab70afafce38ab90c8215db5d742f7751" Jan 29 08:41:38 crc kubenswrapper[4861]: I0129 08:41:38.255795 4861 generic.go:334] "Generic (PLEG): container finished" podID="b9c6b766-edc9-4b0b-a2d2-a54171a0570a" containerID="0624f04b13d25bc60724ed281acbb3a4398fb68557c7b9eba9b825421f79a774" exitCode=0 Jan 29 08:41:38 crc kubenswrapper[4861]: I0129 08:41:38.256620 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" event={"ID":"b9c6b766-edc9-4b0b-a2d2-a54171a0570a","Type":"ContainerDied","Data":"0624f04b13d25bc60724ed281acbb3a4398fb68557c7b9eba9b825421f79a774"} Jan 29 08:41:39 crc kubenswrapper[4861]: I0129 08:41:39.835429 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:41:39 crc kubenswrapper[4861]: I0129 08:41:39.918737 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jphj\" (UniqueName: \"kubernetes.io/projected/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-kube-api-access-6jphj\") pod \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " Jan 29 08:41:39 crc kubenswrapper[4861]: I0129 08:41:39.918865 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory\") pod \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " Jan 29 08:41:39 crc kubenswrapper[4861]: I0129 08:41:39.918924 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-ssh-key-openstack-cell1\") pod \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " Jan 29 08:41:39 crc kubenswrapper[4861]: I0129 08:41:39.927107 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-kube-api-access-6jphj" (OuterVolumeSpecName: "kube-api-access-6jphj") pod "b9c6b766-edc9-4b0b-a2d2-a54171a0570a" (UID: "b9c6b766-edc9-4b0b-a2d2-a54171a0570a"). InnerVolumeSpecName "kube-api-access-6jphj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:41:39 crc kubenswrapper[4861]: E0129 08:41:39.945814 4861 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory podName:b9c6b766-edc9-4b0b-a2d2-a54171a0570a nodeName:}" failed. No retries permitted until 2026-01-29 08:41:40.445770754 +0000 UTC m=+7592.117265311 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory" (UniqueName: "kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory") pod "b9c6b766-edc9-4b0b-a2d2-a54171a0570a" (UID: "b9c6b766-edc9-4b0b-a2d2-a54171a0570a") : error deleting /var/lib/kubelet/pods/b9c6b766-edc9-4b0b-a2d2-a54171a0570a/volume-subpaths: remove /var/lib/kubelet/pods/b9c6b766-edc9-4b0b-a2d2-a54171a0570a/volume-subpaths: no such file or directory Jan 29 08:41:39 crc kubenswrapper[4861]: I0129 08:41:39.948304 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "b9c6b766-edc9-4b0b-a2d2-a54171a0570a" (UID: "b9c6b766-edc9-4b0b-a2d2-a54171a0570a"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.022847 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jphj\" (UniqueName: \"kubernetes.io/projected/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-kube-api-access-6jphj\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.022898 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.277595 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" event={"ID":"b9c6b766-edc9-4b0b-a2d2-a54171a0570a","Type":"ContainerDied","Data":"adc88808087857e8bb6c02b02ed64e7e95f5990bcb738f51106be408dcbe0036"} Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.277650 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adc88808087857e8bb6c02b02ed64e7e95f5990bcb738f51106be408dcbe0036" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.277746 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-5kzn6" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.365794 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-shvjr"] Jan 29 08:41:40 crc kubenswrapper[4861]: E0129 08:41:40.366719 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="extract-content" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.366807 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="extract-content" Jan 29 08:41:40 crc kubenswrapper[4861]: E0129 08:41:40.366872 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9c6b766-edc9-4b0b-a2d2-a54171a0570a" containerName="configure-os-openstack-openstack-cell1" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.366931 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9c6b766-edc9-4b0b-a2d2-a54171a0570a" containerName="configure-os-openstack-openstack-cell1" Jan 29 08:41:40 crc kubenswrapper[4861]: E0129 08:41:40.366988 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="registry-server" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.367043 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="registry-server" Jan 29 08:41:40 crc kubenswrapper[4861]: E0129 08:41:40.367723 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1e46956-ff67-4f66-a222-545324fe670a" containerName="extract-content" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.367801 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1e46956-ff67-4f66-a222-545324fe670a" containerName="extract-content" Jan 29 08:41:40 crc kubenswrapper[4861]: E0129 08:41:40.367855 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1e46956-ff67-4f66-a222-545324fe670a" containerName="registry-server" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.367914 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1e46956-ff67-4f66-a222-545324fe670a" containerName="registry-server" Jan 29 08:41:40 crc kubenswrapper[4861]: E0129 08:41:40.367999 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="extract-utilities" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.368055 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="extract-utilities" Jan 29 08:41:40 crc kubenswrapper[4861]: E0129 08:41:40.368155 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1e46956-ff67-4f66-a222-545324fe670a" containerName="extract-utilities" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.368211 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1e46956-ff67-4f66-a222-545324fe670a" containerName="extract-utilities" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.368745 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="93ac62cc-73cb-4549-8563-9d16250f5ec8" containerName="registry-server" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.368844 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1e46956-ff67-4f66-a222-545324fe670a" containerName="registry-server" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.368903 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9c6b766-edc9-4b0b-a2d2-a54171a0570a" containerName="configure-os-openstack-openstack-cell1" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.369754 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.378599 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-shvjr"] Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.432937 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-inventory-0\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.433005 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.433489 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnp5v\" (UniqueName: \"kubernetes.io/projected/9a1adddb-0afb-4b8a-b08d-24a8045a6010-kube-api-access-gnp5v\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.534954 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory\") pod \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\" (UID: \"b9c6b766-edc9-4b0b-a2d2-a54171a0570a\") " Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.535593 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnp5v\" (UniqueName: \"kubernetes.io/projected/9a1adddb-0afb-4b8a-b08d-24a8045a6010-kube-api-access-gnp5v\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.535717 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-inventory-0\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.535741 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.542272 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory" (OuterVolumeSpecName: "inventory") pod "b9c6b766-edc9-4b0b-a2d2-a54171a0570a" (UID: "b9c6b766-edc9-4b0b-a2d2-a54171a0570a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.545171 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-inventory-0\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.558884 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.567746 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnp5v\" (UniqueName: \"kubernetes.io/projected/9a1adddb-0afb-4b8a-b08d-24a8045a6010-kube-api-access-gnp5v\") pod \"ssh-known-hosts-openstack-shvjr\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.637642 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9c6b766-edc9-4b0b-a2d2-a54171a0570a-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:40 crc kubenswrapper[4861]: I0129 08:41:40.686563 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:41 crc kubenswrapper[4861]: I0129 08:41:41.207564 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-shvjr"] Jan 29 08:41:41 crc kubenswrapper[4861]: I0129 08:41:41.291543 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-shvjr" event={"ID":"9a1adddb-0afb-4b8a-b08d-24a8045a6010","Type":"ContainerStarted","Data":"1a1ed5c7194cff444c6dceba107ea8dfa544b4f17fb445d7e362c7c2c0d01e9b"} Jan 29 08:41:42 crc kubenswrapper[4861]: I0129 08:41:42.305580 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-shvjr" event={"ID":"9a1adddb-0afb-4b8a-b08d-24a8045a6010","Type":"ContainerStarted","Data":"6e8f12cc1f516b2bac5abb5c29e2e892a49b147b6bc094073f5e36f54f40e4d4"} Jan 29 08:41:42 crc kubenswrapper[4861]: I0129 08:41:42.327185 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-shvjr" podStartSLOduration=1.875782727 podStartE2EDuration="2.327065281s" podCreationTimestamp="2026-01-29 08:41:40 +0000 UTC" firstStartedPulling="2026-01-29 08:41:41.213306253 +0000 UTC m=+7592.884800820" lastFinishedPulling="2026-01-29 08:41:41.664588817 +0000 UTC m=+7593.336083374" observedRunningTime="2026-01-29 08:41:42.319306414 +0000 UTC m=+7593.990800981" watchObservedRunningTime="2026-01-29 08:41:42.327065281 +0000 UTC m=+7593.998559838" Jan 29 08:41:50 crc kubenswrapper[4861]: I0129 08:41:50.385191 4861 generic.go:334] "Generic (PLEG): container finished" podID="9a1adddb-0afb-4b8a-b08d-24a8045a6010" containerID="6e8f12cc1f516b2bac5abb5c29e2e892a49b147b6bc094073f5e36f54f40e4d4" exitCode=0 Jan 29 08:41:50 crc kubenswrapper[4861]: I0129 08:41:50.385270 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-shvjr" event={"ID":"9a1adddb-0afb-4b8a-b08d-24a8045a6010","Type":"ContainerDied","Data":"6e8f12cc1f516b2bac5abb5c29e2e892a49b147b6bc094073f5e36f54f40e4d4"} Jan 29 08:41:51 crc kubenswrapper[4861]: I0129 08:41:51.969265 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:51 crc kubenswrapper[4861]: I0129 08:41:51.996545 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-inventory-0\") pod \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " Jan 29 08:41:51 crc kubenswrapper[4861]: I0129 08:41:51.996639 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnp5v\" (UniqueName: \"kubernetes.io/projected/9a1adddb-0afb-4b8a-b08d-24a8045a6010-kube-api-access-gnp5v\") pod \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " Jan 29 08:41:51 crc kubenswrapper[4861]: I0129 08:41:51.996994 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-ssh-key-openstack-cell1\") pod \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\" (UID: \"9a1adddb-0afb-4b8a-b08d-24a8045a6010\") " Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.003416 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a1adddb-0afb-4b8a-b08d-24a8045a6010-kube-api-access-gnp5v" (OuterVolumeSpecName: "kube-api-access-gnp5v") pod "9a1adddb-0afb-4b8a-b08d-24a8045a6010" (UID: "9a1adddb-0afb-4b8a-b08d-24a8045a6010"). InnerVolumeSpecName "kube-api-access-gnp5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.033313 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "9a1adddb-0afb-4b8a-b08d-24a8045a6010" (UID: "9a1adddb-0afb-4b8a-b08d-24a8045a6010"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.052184 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "9a1adddb-0afb-4b8a-b08d-24a8045a6010" (UID: "9a1adddb-0afb-4b8a-b08d-24a8045a6010"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.101320 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.101359 4861 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9a1adddb-0afb-4b8a-b08d-24a8045a6010-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.101371 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnp5v\" (UniqueName: \"kubernetes.io/projected/9a1adddb-0afb-4b8a-b08d-24a8045a6010-kube-api-access-gnp5v\") on node \"crc\" DevicePath \"\"" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.405214 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-shvjr" event={"ID":"9a1adddb-0afb-4b8a-b08d-24a8045a6010","Type":"ContainerDied","Data":"1a1ed5c7194cff444c6dceba107ea8dfa544b4f17fb445d7e362c7c2c0d01e9b"} Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.405287 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a1ed5c7194cff444c6dceba107ea8dfa544b4f17fb445d7e362c7c2c0d01e9b" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.405703 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-shvjr" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.497096 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-4d2vz"] Jan 29 08:41:52 crc kubenswrapper[4861]: E0129 08:41:52.499221 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a1adddb-0afb-4b8a-b08d-24a8045a6010" containerName="ssh-known-hosts-openstack" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.499250 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a1adddb-0afb-4b8a-b08d-24a8045a6010" containerName="ssh-known-hosts-openstack" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.500035 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a1adddb-0afb-4b8a-b08d-24a8045a6010" containerName="ssh-known-hosts-openstack" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.501552 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.509466 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.509895 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.510387 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.520251 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-inventory\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.520328 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-ssh-key-openstack-cell1\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.520363 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txhpd\" (UniqueName: \"kubernetes.io/projected/de505364-8d47-4ec7-ad63-9e28daabfb88-kube-api-access-txhpd\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.528239 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.541943 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-4d2vz"] Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.623043 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-inventory\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.623119 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-ssh-key-openstack-cell1\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.623148 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txhpd\" (UniqueName: \"kubernetes.io/projected/de505364-8d47-4ec7-ad63-9e28daabfb88-kube-api-access-txhpd\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.629950 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-ssh-key-openstack-cell1\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.629949 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-inventory\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.646924 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txhpd\" (UniqueName: \"kubernetes.io/projected/de505364-8d47-4ec7-ad63-9e28daabfb88-kube-api-access-txhpd\") pod \"run-os-openstack-openstack-cell1-4d2vz\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:52 crc kubenswrapper[4861]: I0129 08:41:52.826669 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:41:53 crc kubenswrapper[4861]: I0129 08:41:53.356919 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-4d2vz"] Jan 29 08:41:53 crc kubenswrapper[4861]: I0129 08:41:53.417595 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" event={"ID":"de505364-8d47-4ec7-ad63-9e28daabfb88","Type":"ContainerStarted","Data":"e03d5fcca9a1403c1dbe11b9096ac6f13376c9f0e2d7f2dba563925f575c0cc7"} Jan 29 08:41:54 crc kubenswrapper[4861]: I0129 08:41:54.428567 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" event={"ID":"de505364-8d47-4ec7-ad63-9e28daabfb88","Type":"ContainerStarted","Data":"64a9ee86d2cbce3e8011fe36c9b9f7b34d739f300154612173114524bac6410a"} Jan 29 08:41:54 crc kubenswrapper[4861]: I0129 08:41:54.453724 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" podStartSLOduration=1.908379695 podStartE2EDuration="2.453708252s" podCreationTimestamp="2026-01-29 08:41:52 +0000 UTC" firstStartedPulling="2026-01-29 08:41:53.365256566 +0000 UTC m=+7605.036751123" lastFinishedPulling="2026-01-29 08:41:53.910585123 +0000 UTC m=+7605.582079680" observedRunningTime="2026-01-29 08:41:54.449374556 +0000 UTC m=+7606.120869133" watchObservedRunningTime="2026-01-29 08:41:54.453708252 +0000 UTC m=+7606.125202809" Jan 29 08:42:02 crc kubenswrapper[4861]: I0129 08:42:02.498699 4861 generic.go:334] "Generic (PLEG): container finished" podID="de505364-8d47-4ec7-ad63-9e28daabfb88" containerID="64a9ee86d2cbce3e8011fe36c9b9f7b34d739f300154612173114524bac6410a" exitCode=0 Jan 29 08:42:02 crc kubenswrapper[4861]: I0129 08:42:02.498768 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" event={"ID":"de505364-8d47-4ec7-ad63-9e28daabfb88","Type":"ContainerDied","Data":"64a9ee86d2cbce3e8011fe36c9b9f7b34d739f300154612173114524bac6410a"} Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.017247 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.136182 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txhpd\" (UniqueName: \"kubernetes.io/projected/de505364-8d47-4ec7-ad63-9e28daabfb88-kube-api-access-txhpd\") pod \"de505364-8d47-4ec7-ad63-9e28daabfb88\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.136276 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-ssh-key-openstack-cell1\") pod \"de505364-8d47-4ec7-ad63-9e28daabfb88\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.136359 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-inventory\") pod \"de505364-8d47-4ec7-ad63-9e28daabfb88\" (UID: \"de505364-8d47-4ec7-ad63-9e28daabfb88\") " Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.142258 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de505364-8d47-4ec7-ad63-9e28daabfb88-kube-api-access-txhpd" (OuterVolumeSpecName: "kube-api-access-txhpd") pod "de505364-8d47-4ec7-ad63-9e28daabfb88" (UID: "de505364-8d47-4ec7-ad63-9e28daabfb88"). InnerVolumeSpecName "kube-api-access-txhpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.163873 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-inventory" (OuterVolumeSpecName: "inventory") pod "de505364-8d47-4ec7-ad63-9e28daabfb88" (UID: "de505364-8d47-4ec7-ad63-9e28daabfb88"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.170996 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "de505364-8d47-4ec7-ad63-9e28daabfb88" (UID: "de505364-8d47-4ec7-ad63-9e28daabfb88"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.240415 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txhpd\" (UniqueName: \"kubernetes.io/projected/de505364-8d47-4ec7-ad63-9e28daabfb88-kube-api-access-txhpd\") on node \"crc\" DevicePath \"\"" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.240545 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.240556 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de505364-8d47-4ec7-ad63-9e28daabfb88-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.542246 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" event={"ID":"de505364-8d47-4ec7-ad63-9e28daabfb88","Type":"ContainerDied","Data":"e03d5fcca9a1403c1dbe11b9096ac6f13376c9f0e2d7f2dba563925f575c0cc7"} Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.542289 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e03d5fcca9a1403c1dbe11b9096ac6f13376c9f0e2d7f2dba563925f575c0cc7" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.542328 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-4d2vz" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.607041 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-2xtw4"] Jan 29 08:42:04 crc kubenswrapper[4861]: E0129 08:42:04.607691 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de505364-8d47-4ec7-ad63-9e28daabfb88" containerName="run-os-openstack-openstack-cell1" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.607720 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="de505364-8d47-4ec7-ad63-9e28daabfb88" containerName="run-os-openstack-openstack-cell1" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.607952 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="de505364-8d47-4ec7-ad63-9e28daabfb88" containerName="run-os-openstack-openstack-cell1" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.608935 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.611297 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.611507 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.613062 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.613350 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.615801 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-2xtw4"] Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.755285 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w2tj\" (UniqueName: \"kubernetes.io/projected/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-kube-api-access-7w2tj\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.755617 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-ssh-key-openstack-cell1\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.755676 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-inventory\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.857639 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-inventory\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.857845 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7w2tj\" (UniqueName: \"kubernetes.io/projected/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-kube-api-access-7w2tj\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.857882 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-ssh-key-openstack-cell1\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.866194 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-inventory\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.867706 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-ssh-key-openstack-cell1\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.873244 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w2tj\" (UniqueName: \"kubernetes.io/projected/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-kube-api-access-7w2tj\") pod \"reboot-os-openstack-openstack-cell1-2xtw4\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:04 crc kubenswrapper[4861]: I0129 08:42:04.959440 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:05 crc kubenswrapper[4861]: I0129 08:42:05.496049 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-2xtw4"] Jan 29 08:42:05 crc kubenswrapper[4861]: I0129 08:42:05.552531 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" event={"ID":"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8","Type":"ContainerStarted","Data":"7f6d7fd39d45a6f4ac9f979743670d942a00f035e18592bf3a00d680ff0c7333"} Jan 29 08:42:06 crc kubenswrapper[4861]: I0129 08:42:06.564430 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" event={"ID":"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8","Type":"ContainerStarted","Data":"792e053e5c8d0fd691ec26fab870909fc58172f6f7eaad24dbaf28715c1f6446"} Jan 29 08:42:07 crc kubenswrapper[4861]: I0129 08:42:07.598913 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" podStartSLOduration=2.815895677 podStartE2EDuration="3.598890297s" podCreationTimestamp="2026-01-29 08:42:04 +0000 UTC" firstStartedPulling="2026-01-29 08:42:05.501718258 +0000 UTC m=+7617.173212815" lastFinishedPulling="2026-01-29 08:42:06.284712878 +0000 UTC m=+7617.956207435" observedRunningTime="2026-01-29 08:42:07.589873977 +0000 UTC m=+7619.261368534" watchObservedRunningTime="2026-01-29 08:42:07.598890297 +0000 UTC m=+7619.270384844" Jan 29 08:42:22 crc kubenswrapper[4861]: I0129 08:42:22.721453 4861 generic.go:334] "Generic (PLEG): container finished" podID="d81c0053-0659-4f0c-bb7e-e8a63e13a4d8" containerID="792e053e5c8d0fd691ec26fab870909fc58172f6f7eaad24dbaf28715c1f6446" exitCode=0 Jan 29 08:42:22 crc kubenswrapper[4861]: I0129 08:42:22.721523 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" event={"ID":"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8","Type":"ContainerDied","Data":"792e053e5c8d0fd691ec26fab870909fc58172f6f7eaad24dbaf28715c1f6446"} Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.245627 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.348595 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-ssh-key-openstack-cell1\") pod \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.348732 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-inventory\") pod \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.348922 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7w2tj\" (UniqueName: \"kubernetes.io/projected/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-kube-api-access-7w2tj\") pod \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\" (UID: \"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8\") " Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.356014 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-kube-api-access-7w2tj" (OuterVolumeSpecName: "kube-api-access-7w2tj") pod "d81c0053-0659-4f0c-bb7e-e8a63e13a4d8" (UID: "d81c0053-0659-4f0c-bb7e-e8a63e13a4d8"). InnerVolumeSpecName "kube-api-access-7w2tj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.388393 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "d81c0053-0659-4f0c-bb7e-e8a63e13a4d8" (UID: "d81c0053-0659-4f0c-bb7e-e8a63e13a4d8"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.389764 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-inventory" (OuterVolumeSpecName: "inventory") pod "d81c0053-0659-4f0c-bb7e-e8a63e13a4d8" (UID: "d81c0053-0659-4f0c-bb7e-e8a63e13a4d8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.453315 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7w2tj\" (UniqueName: \"kubernetes.io/projected/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-kube-api-access-7w2tj\") on node \"crc\" DevicePath \"\"" Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.453373 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.453389 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d81c0053-0659-4f0c-bb7e-e8a63e13a4d8-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.741422 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" event={"ID":"d81c0053-0659-4f0c-bb7e-e8a63e13a4d8","Type":"ContainerDied","Data":"7f6d7fd39d45a6f4ac9f979743670d942a00f035e18592bf3a00d680ff0c7333"} Jan 29 08:42:24 crc kubenswrapper[4861]: I0129 08:42:24.741479 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f6d7fd39d45a6f4ac9f979743670d942a00f035e18592bf3a00d680ff0c7333" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.741990 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-2xtw4" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.862648 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-dbqfj"] Jan 29 08:42:25 crc kubenswrapper[4861]: E0129 08:42:24.863155 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d81c0053-0659-4f0c-bb7e-e8a63e13a4d8" containerName="reboot-os-openstack-openstack-cell1" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.863170 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d81c0053-0659-4f0c-bb7e-e8a63e13a4d8" containerName="reboot-os-openstack-openstack-cell1" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.863418 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d81c0053-0659-4f0c-bb7e-e8a63e13a4d8" containerName="reboot-os-openstack-openstack-cell1" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.864441 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.867360 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.871715 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-dbqfj"] Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.882648 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.883103 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.883215 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-libvirt-default-certs-0" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.883335 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-telemetry-default-certs-0" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.883487 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.884137 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-neutron-metadata-default-certs-0" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:24.893464 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-ovn-default-certs-0" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071515 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071620 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071655 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071697 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071721 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071747 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpgnd\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-kube-api-access-qpgnd\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071768 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071833 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071867 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071917 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.071968 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.072011 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ssh-key-openstack-cell1\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.072063 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.072106 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-inventory\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.072159 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.175157 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-inventory\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.175268 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176341 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176453 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176494 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176583 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176613 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176640 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176660 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpgnd\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-kube-api-access-qpgnd\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176775 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176810 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176874 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176914 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.176963 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ssh-key-openstack-cell1\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.177023 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.180914 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.184206 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.184893 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.186033 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.186317 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ssh-key-openstack-cell1\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.186537 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.186990 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-inventory\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.187730 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.188535 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.197778 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.197949 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.199568 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.203180 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpgnd\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-kube-api-access-qpgnd\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.207485 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.210810 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-dbqfj\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.216653 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:42:25 crc kubenswrapper[4861]: I0129 08:42:25.952454 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-dbqfj"] Jan 29 08:42:25 crc kubenswrapper[4861]: W0129 08:42:25.955809 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod394365fa_5d64_4f5e_b8f3_9d4abc0dbe07.slice/crio-53f31683cc3f49d9d7898afc73ce85976e71129f4ff973bffac5c9e5cab0ec0f WatchSource:0}: Error finding container 53f31683cc3f49d9d7898afc73ce85976e71129f4ff973bffac5c9e5cab0ec0f: Status 404 returned error can't find the container with id 53f31683cc3f49d9d7898afc73ce85976e71129f4ff973bffac5c9e5cab0ec0f Jan 29 08:42:26 crc kubenswrapper[4861]: I0129 08:42:26.767260 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" event={"ID":"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07","Type":"ContainerStarted","Data":"53f31683cc3f49d9d7898afc73ce85976e71129f4ff973bffac5c9e5cab0ec0f"} Jan 29 08:42:27 crc kubenswrapper[4861]: I0129 08:42:27.777522 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" event={"ID":"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07","Type":"ContainerStarted","Data":"0eac570ae24f6c6b5e066bfcbd34fb2201cda3cafe2dca03047a84429a10486f"} Jan 29 08:42:27 crc kubenswrapper[4861]: I0129 08:42:27.812484 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" podStartSLOduration=3.35960477 podStartE2EDuration="3.812462987s" podCreationTimestamp="2026-01-29 08:42:24 +0000 UTC" firstStartedPulling="2026-01-29 08:42:25.957936413 +0000 UTC m=+7637.629430970" lastFinishedPulling="2026-01-29 08:42:26.41079463 +0000 UTC m=+7638.082289187" observedRunningTime="2026-01-29 08:42:27.80354 +0000 UTC m=+7639.475034557" watchObservedRunningTime="2026-01-29 08:42:27.812462987 +0000 UTC m=+7639.483957544" Jan 29 08:43:02 crc kubenswrapper[4861]: I0129 08:43:02.074272 4861 generic.go:334] "Generic (PLEG): container finished" podID="394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" containerID="0eac570ae24f6c6b5e066bfcbd34fb2201cda3cafe2dca03047a84429a10486f" exitCode=0 Jan 29 08:43:02 crc kubenswrapper[4861]: I0129 08:43:02.074376 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" event={"ID":"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07","Type":"ContainerDied","Data":"0eac570ae24f6c6b5e066bfcbd34fb2201cda3cafe2dca03047a84429a10486f"} Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.584750 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761294 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-libvirt-default-certs-0\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761475 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-metadata-combined-ca-bundle\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761499 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-sriov-combined-ca-bundle\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761532 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-neutron-metadata-default-certs-0\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761548 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-ovn-default-certs-0\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761614 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-dhcp-combined-ca-bundle\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761661 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpgnd\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-kube-api-access-qpgnd\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761686 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ssh-key-openstack-cell1\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761718 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-inventory\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761741 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-telemetry-combined-ca-bundle\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761782 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-nova-combined-ca-bundle\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761797 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-bootstrap-combined-ca-bundle\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761861 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ovn-combined-ca-bundle\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761891 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-libvirt-combined-ca-bundle\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.761917 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-telemetry-default-certs-0\") pod \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\" (UID: \"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07\") " Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.770125 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.770182 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-libvirt-default-certs-0") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "openstack-cell1-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.770171 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.770205 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.770235 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-kube-api-access-qpgnd" (OuterVolumeSpecName: "kube-api-access-qpgnd") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "kube-api-access-qpgnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.770582 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.770775 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.771579 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-telemetry-default-certs-0") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "openstack-cell1-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.772527 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-neutron-metadata-default-certs-0") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "openstack-cell1-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.772563 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.773198 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.773484 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.787582 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-ovn-default-certs-0") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "openstack-cell1-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.796560 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.798262 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-inventory" (OuterVolumeSpecName: "inventory") pod "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" (UID: "394365fa-5d64-4f5e-b8f3-9d4abc0dbe07"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864534 4861 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864580 4861 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864590 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864600 4861 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864620 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864630 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864639 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864649 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864663 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864672 4861 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-openstack-cell1-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864681 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864690 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpgnd\" (UniqueName: \"kubernetes.io/projected/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-kube-api-access-qpgnd\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864700 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864708 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:03 crc kubenswrapper[4861]: I0129 08:43:03.864717 4861 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/394365fa-5d64-4f5e-b8f3-9d4abc0dbe07-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.105897 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" event={"ID":"394365fa-5d64-4f5e-b8f3-9d4abc0dbe07","Type":"ContainerDied","Data":"53f31683cc3f49d9d7898afc73ce85976e71129f4ff973bffac5c9e5cab0ec0f"} Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.105954 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53f31683cc3f49d9d7898afc73ce85976e71129f4ff973bffac5c9e5cab0ec0f" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.106636 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-dbqfj" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.205322 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-w2rml"] Jan 29 08:43:04 crc kubenswrapper[4861]: E0129 08:43:04.205886 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" containerName="install-certs-openstack-openstack-cell1" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.205910 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" containerName="install-certs-openstack-openstack-cell1" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.206251 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="394365fa-5d64-4f5e-b8f3-9d4abc0dbe07" containerName="install-certs-openstack-openstack-cell1" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.207340 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.211040 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.211091 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.211193 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.211257 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.211487 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.231954 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-w2rml"] Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.377490 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-inventory\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.377547 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.377660 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rzmm\" (UniqueName: \"kubernetes.io/projected/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-kube-api-access-7rzmm\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.377696 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.377748 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ssh-key-openstack-cell1\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.480110 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-inventory\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.480396 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.480622 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rzmm\" (UniqueName: \"kubernetes.io/projected/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-kube-api-access-7rzmm\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.480806 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.480993 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ssh-key-openstack-cell1\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.482979 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.486924 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ssh-key-openstack-cell1\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.487180 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-inventory\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.498290 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.498758 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rzmm\" (UniqueName: \"kubernetes.io/projected/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-kube-api-access-7rzmm\") pod \"ovn-openstack-openstack-cell1-w2rml\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:04 crc kubenswrapper[4861]: I0129 08:43:04.547794 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:43:05 crc kubenswrapper[4861]: I0129 08:43:05.076488 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-w2rml"] Jan 29 08:43:05 crc kubenswrapper[4861]: I0129 08:43:05.128680 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-w2rml" event={"ID":"0ea9ddc2-bed7-4baa-9835-abfd8aefc637","Type":"ContainerStarted","Data":"53083d2692fed8eedf7919e0a9f0261b077a64ee9db9510701ac8790da09d60e"} Jan 29 08:43:06 crc kubenswrapper[4861]: I0129 08:43:06.133998 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-w2rml" event={"ID":"0ea9ddc2-bed7-4baa-9835-abfd8aefc637","Type":"ContainerStarted","Data":"e3e4d06f6fe8413fe1ad84c0b6cda02de489e9ab655f40ebe4ad5754132a4423"} Jan 29 08:43:30 crc kubenswrapper[4861]: I0129 08:43:30.630612 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:43:30 crc kubenswrapper[4861]: I0129 08:43:30.631284 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:44:00 crc kubenswrapper[4861]: I0129 08:44:00.630284 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:44:00 crc kubenswrapper[4861]: I0129 08:44:00.630859 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:44:07 crc kubenswrapper[4861]: E0129 08:44:07.812335 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ea9ddc2_bed7_4baa_9835_abfd8aefc637.slice/crio-e3e4d06f6fe8413fe1ad84c0b6cda02de489e9ab655f40ebe4ad5754132a4423.scope\": RecentStats: unable to find data in memory cache]" Jan 29 08:44:08 crc kubenswrapper[4861]: I0129 08:44:08.137665 4861 generic.go:334] "Generic (PLEG): container finished" podID="0ea9ddc2-bed7-4baa-9835-abfd8aefc637" containerID="e3e4d06f6fe8413fe1ad84c0b6cda02de489e9ab655f40ebe4ad5754132a4423" exitCode=0 Jan 29 08:44:08 crc kubenswrapper[4861]: I0129 08:44:08.137876 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-w2rml" event={"ID":"0ea9ddc2-bed7-4baa-9835-abfd8aefc637","Type":"ContainerDied","Data":"e3e4d06f6fe8413fe1ad84c0b6cda02de489e9ab655f40ebe4ad5754132a4423"} Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.610151 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.726523 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ssh-key-openstack-cell1\") pod \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.726586 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rzmm\" (UniqueName: \"kubernetes.io/projected/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-kube-api-access-7rzmm\") pod \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.726975 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovncontroller-config-0\") pod \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.727040 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-inventory\") pod \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.727245 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovn-combined-ca-bundle\") pod \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\" (UID: \"0ea9ddc2-bed7-4baa-9835-abfd8aefc637\") " Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.732505 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "0ea9ddc2-bed7-4baa-9835-abfd8aefc637" (UID: "0ea9ddc2-bed7-4baa-9835-abfd8aefc637"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.732896 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-kube-api-access-7rzmm" (OuterVolumeSpecName: "kube-api-access-7rzmm") pod "0ea9ddc2-bed7-4baa-9835-abfd8aefc637" (UID: "0ea9ddc2-bed7-4baa-9835-abfd8aefc637"). InnerVolumeSpecName "kube-api-access-7rzmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.754256 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "0ea9ddc2-bed7-4baa-9835-abfd8aefc637" (UID: "0ea9ddc2-bed7-4baa-9835-abfd8aefc637"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.756834 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-inventory" (OuterVolumeSpecName: "inventory") pod "0ea9ddc2-bed7-4baa-9835-abfd8aefc637" (UID: "0ea9ddc2-bed7-4baa-9835-abfd8aefc637"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.758123 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "0ea9ddc2-bed7-4baa-9835-abfd8aefc637" (UID: "0ea9ddc2-bed7-4baa-9835-abfd8aefc637"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.829718 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.829752 4861 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.829769 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.829781 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rzmm\" (UniqueName: \"kubernetes.io/projected/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-kube-api-access-7rzmm\") on node \"crc\" DevicePath \"\"" Jan 29 08:44:09 crc kubenswrapper[4861]: I0129 08:44:09.829792 4861 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ea9ddc2-bed7-4baa-9835-abfd8aefc637-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.157977 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-w2rml" event={"ID":"0ea9ddc2-bed7-4baa-9835-abfd8aefc637","Type":"ContainerDied","Data":"53083d2692fed8eedf7919e0a9f0261b077a64ee9db9510701ac8790da09d60e"} Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.158308 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53083d2692fed8eedf7919e0a9f0261b077a64ee9db9510701ac8790da09d60e" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.158040 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-w2rml" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.253332 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-5jc4s"] Jan 29 08:44:10 crc kubenswrapper[4861]: E0129 08:44:10.253886 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ea9ddc2-bed7-4baa-9835-abfd8aefc637" containerName="ovn-openstack-openstack-cell1" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.253933 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ea9ddc2-bed7-4baa-9835-abfd8aefc637" containerName="ovn-openstack-openstack-cell1" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.254163 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ea9ddc2-bed7-4baa-9835-abfd8aefc637" containerName="ovn-openstack-openstack-cell1" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.255012 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.261566 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.261793 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.262132 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.262170 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.262225 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.262297 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.264094 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-5jc4s"] Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.342793 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-ssh-key-openstack-cell1\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.342840 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.342895 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9rpl\" (UniqueName: \"kubernetes.io/projected/64030eee-0e1c-4038-968b-421a3542fa93-kube-api-access-k9rpl\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.342986 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.343036 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.343125 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.444468 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-ssh-key-openstack-cell1\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.444515 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.444537 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9rpl\" (UniqueName: \"kubernetes.io/projected/64030eee-0e1c-4038-968b-421a3542fa93-kube-api-access-k9rpl\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.444606 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.444640 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.444697 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.449552 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.450012 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.450225 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-ssh-key-openstack-cell1\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.451974 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.459537 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.464567 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9rpl\" (UniqueName: \"kubernetes.io/projected/64030eee-0e1c-4038-968b-421a3542fa93-kube-api-access-k9rpl\") pod \"neutron-metadata-openstack-openstack-cell1-5jc4s\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:10 crc kubenswrapper[4861]: I0129 08:44:10.586338 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:44:11 crc kubenswrapper[4861]: W0129 08:44:11.142915 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64030eee_0e1c_4038_968b_421a3542fa93.slice/crio-94d35abe85e5001051395a55ca4fad1b0413fb3ddde5aeba603d04fbc51840d0 WatchSource:0}: Error finding container 94d35abe85e5001051395a55ca4fad1b0413fb3ddde5aeba603d04fbc51840d0: Status 404 returned error can't find the container with id 94d35abe85e5001051395a55ca4fad1b0413fb3ddde5aeba603d04fbc51840d0 Jan 29 08:44:11 crc kubenswrapper[4861]: I0129 08:44:11.144242 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-5jc4s"] Jan 29 08:44:11 crc kubenswrapper[4861]: I0129 08:44:11.170921 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" event={"ID":"64030eee-0e1c-4038-968b-421a3542fa93","Type":"ContainerStarted","Data":"94d35abe85e5001051395a55ca4fad1b0413fb3ddde5aeba603d04fbc51840d0"} Jan 29 08:44:12 crc kubenswrapper[4861]: I0129 08:44:12.182656 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" event={"ID":"64030eee-0e1c-4038-968b-421a3542fa93","Type":"ContainerStarted","Data":"4079a10a1339b23e21ea9f0cb83f6c6574d78c20171e527ea14f560818bea161"} Jan 29 08:44:12 crc kubenswrapper[4861]: I0129 08:44:12.203281 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" podStartSLOduration=1.565002196 podStartE2EDuration="2.203258135s" podCreationTimestamp="2026-01-29 08:44:10 +0000 UTC" firstStartedPulling="2026-01-29 08:44:11.148680892 +0000 UTC m=+7742.820175439" lastFinishedPulling="2026-01-29 08:44:11.786936821 +0000 UTC m=+7743.458431378" observedRunningTime="2026-01-29 08:44:12.199445064 +0000 UTC m=+7743.870939641" watchObservedRunningTime="2026-01-29 08:44:12.203258135 +0000 UTC m=+7743.874752702" Jan 29 08:44:30 crc kubenswrapper[4861]: I0129 08:44:30.629497 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:44:30 crc kubenswrapper[4861]: I0129 08:44:30.629927 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:44:30 crc kubenswrapper[4861]: I0129 08:44:30.629967 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:44:30 crc kubenswrapper[4861]: I0129 08:44:30.630726 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:44:30 crc kubenswrapper[4861]: I0129 08:44:30.630781 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" gracePeriod=600 Jan 29 08:44:30 crc kubenswrapper[4861]: E0129 08:44:30.766780 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:44:31 crc kubenswrapper[4861]: I0129 08:44:31.367121 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" exitCode=0 Jan 29 08:44:31 crc kubenswrapper[4861]: I0129 08:44:31.367430 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af"} Jan 29 08:44:31 crc kubenswrapper[4861]: I0129 08:44:31.367461 4861 scope.go:117] "RemoveContainer" containerID="5a4be9b1e7d970af3d8f175e27e26eae76f837f496a6c2d96c5fa17cc090c87a" Jan 29 08:44:31 crc kubenswrapper[4861]: I0129 08:44:31.368100 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:44:31 crc kubenswrapper[4861]: E0129 08:44:31.368335 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:44:44 crc kubenswrapper[4861]: I0129 08:44:44.117486 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:44:44 crc kubenswrapper[4861]: E0129 08:44:44.118499 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:44:55 crc kubenswrapper[4861]: I0129 08:44:55.117138 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:44:55 crc kubenswrapper[4861]: E0129 08:44:55.117928 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.170261 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75"] Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.172679 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.174857 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.177550 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.194037 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75"] Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.237720 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20c8c33b-41c3-4245-8595-e2462cbb7feb-config-volume\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.237811 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20c8c33b-41c3-4245-8595-e2462cbb7feb-secret-volume\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.237899 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw5pz\" (UniqueName: \"kubernetes.io/projected/20c8c33b-41c3-4245-8595-e2462cbb7feb-kube-api-access-lw5pz\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.340421 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20c8c33b-41c3-4245-8595-e2462cbb7feb-config-volume\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.340883 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20c8c33b-41c3-4245-8595-e2462cbb7feb-secret-volume\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.340925 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw5pz\" (UniqueName: \"kubernetes.io/projected/20c8c33b-41c3-4245-8595-e2462cbb7feb-kube-api-access-lw5pz\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.341696 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20c8c33b-41c3-4245-8595-e2462cbb7feb-config-volume\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.348312 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20c8c33b-41c3-4245-8595-e2462cbb7feb-secret-volume\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.358973 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw5pz\" (UniqueName: \"kubernetes.io/projected/20c8c33b-41c3-4245-8595-e2462cbb7feb-kube-api-access-lw5pz\") pod \"collect-profiles-29494605-9ps75\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.494924 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:00 crc kubenswrapper[4861]: I0129 08:45:00.970181 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75"] Jan 29 08:45:01 crc kubenswrapper[4861]: I0129 08:45:01.657560 4861 generic.go:334] "Generic (PLEG): container finished" podID="20c8c33b-41c3-4245-8595-e2462cbb7feb" containerID="8249acf1cb9d81b518c5bf49658ede700c47fb3c5a80600965d2a4d0939a7a55" exitCode=0 Jan 29 08:45:01 crc kubenswrapper[4861]: I0129 08:45:01.657672 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" event={"ID":"20c8c33b-41c3-4245-8595-e2462cbb7feb","Type":"ContainerDied","Data":"8249acf1cb9d81b518c5bf49658ede700c47fb3c5a80600965d2a4d0939a7a55"} Jan 29 08:45:01 crc kubenswrapper[4861]: I0129 08:45:01.657908 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" event={"ID":"20c8c33b-41c3-4245-8595-e2462cbb7feb","Type":"ContainerStarted","Data":"00f29dd0a60c0e79de28ab9cc9a02f00d7523f51ac5c03b370f5c99fa5237f8a"} Jan 29 08:45:02 crc kubenswrapper[4861]: I0129 08:45:02.668998 4861 generic.go:334] "Generic (PLEG): container finished" podID="64030eee-0e1c-4038-968b-421a3542fa93" containerID="4079a10a1339b23e21ea9f0cb83f6c6574d78c20171e527ea14f560818bea161" exitCode=0 Jan 29 08:45:02 crc kubenswrapper[4861]: I0129 08:45:02.669117 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" event={"ID":"64030eee-0e1c-4038-968b-421a3542fa93","Type":"ContainerDied","Data":"4079a10a1339b23e21ea9f0cb83f6c6574d78c20171e527ea14f560818bea161"} Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.054415 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.104918 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20c8c33b-41c3-4245-8595-e2462cbb7feb-config-volume\") pod \"20c8c33b-41c3-4245-8595-e2462cbb7feb\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.105148 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20c8c33b-41c3-4245-8595-e2462cbb7feb-secret-volume\") pod \"20c8c33b-41c3-4245-8595-e2462cbb7feb\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.105223 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lw5pz\" (UniqueName: \"kubernetes.io/projected/20c8c33b-41c3-4245-8595-e2462cbb7feb-kube-api-access-lw5pz\") pod \"20c8c33b-41c3-4245-8595-e2462cbb7feb\" (UID: \"20c8c33b-41c3-4245-8595-e2462cbb7feb\") " Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.105799 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20c8c33b-41c3-4245-8595-e2462cbb7feb-config-volume" (OuterVolumeSpecName: "config-volume") pod "20c8c33b-41c3-4245-8595-e2462cbb7feb" (UID: "20c8c33b-41c3-4245-8595-e2462cbb7feb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.115116 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20c8c33b-41c3-4245-8595-e2462cbb7feb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "20c8c33b-41c3-4245-8595-e2462cbb7feb" (UID: "20c8c33b-41c3-4245-8595-e2462cbb7feb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.121873 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20c8c33b-41c3-4245-8595-e2462cbb7feb-kube-api-access-lw5pz" (OuterVolumeSpecName: "kube-api-access-lw5pz") pod "20c8c33b-41c3-4245-8595-e2462cbb7feb" (UID: "20c8c33b-41c3-4245-8595-e2462cbb7feb"). InnerVolumeSpecName "kube-api-access-lw5pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.208479 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20c8c33b-41c3-4245-8595-e2462cbb7feb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.208512 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20c8c33b-41c3-4245-8595-e2462cbb7feb-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.208523 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lw5pz\" (UniqueName: \"kubernetes.io/projected/20c8c33b-41c3-4245-8595-e2462cbb7feb-kube-api-access-lw5pz\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.680686 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" event={"ID":"20c8c33b-41c3-4245-8595-e2462cbb7feb","Type":"ContainerDied","Data":"00f29dd0a60c0e79de28ab9cc9a02f00d7523f51ac5c03b370f5c99fa5237f8a"} Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.680736 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00f29dd0a60c0e79de28ab9cc9a02f00d7523f51ac5c03b370f5c99fa5237f8a" Jan 29 08:45:03 crc kubenswrapper[4861]: I0129 08:45:03.680804 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494605-9ps75" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.213014 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh"] Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.229339 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494560-6h8fh"] Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.238216 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.338137 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-inventory\") pod \"64030eee-0e1c-4038-968b-421a3542fa93\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.338389 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9rpl\" (UniqueName: \"kubernetes.io/projected/64030eee-0e1c-4038-968b-421a3542fa93-kube-api-access-k9rpl\") pod \"64030eee-0e1c-4038-968b-421a3542fa93\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.338544 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-nova-metadata-neutron-config-0\") pod \"64030eee-0e1c-4038-968b-421a3542fa93\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.338614 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-metadata-combined-ca-bundle\") pod \"64030eee-0e1c-4038-968b-421a3542fa93\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.338742 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-ovn-metadata-agent-neutron-config-0\") pod \"64030eee-0e1c-4038-968b-421a3542fa93\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.338957 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-ssh-key-openstack-cell1\") pod \"64030eee-0e1c-4038-968b-421a3542fa93\" (UID: \"64030eee-0e1c-4038-968b-421a3542fa93\") " Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.344082 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64030eee-0e1c-4038-968b-421a3542fa93-kube-api-access-k9rpl" (OuterVolumeSpecName: "kube-api-access-k9rpl") pod "64030eee-0e1c-4038-968b-421a3542fa93" (UID: "64030eee-0e1c-4038-968b-421a3542fa93"). InnerVolumeSpecName "kube-api-access-k9rpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.344221 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "64030eee-0e1c-4038-968b-421a3542fa93" (UID: "64030eee-0e1c-4038-968b-421a3542fa93"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.372753 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "64030eee-0e1c-4038-968b-421a3542fa93" (UID: "64030eee-0e1c-4038-968b-421a3542fa93"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.373293 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "64030eee-0e1c-4038-968b-421a3542fa93" (UID: "64030eee-0e1c-4038-968b-421a3542fa93"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.384126 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "64030eee-0e1c-4038-968b-421a3542fa93" (UID: "64030eee-0e1c-4038-968b-421a3542fa93"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.395732 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-inventory" (OuterVolumeSpecName: "inventory") pod "64030eee-0e1c-4038-968b-421a3542fa93" (UID: "64030eee-0e1c-4038-968b-421a3542fa93"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.442114 4861 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.442184 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.442202 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.442212 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.442223 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64030eee-0e1c-4038-968b-421a3542fa93-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.442234 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9rpl\" (UniqueName: \"kubernetes.io/projected/64030eee-0e1c-4038-968b-421a3542fa93-kube-api-access-k9rpl\") on node \"crc\" DevicePath \"\"" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.693577 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" event={"ID":"64030eee-0e1c-4038-968b-421a3542fa93","Type":"ContainerDied","Data":"94d35abe85e5001051395a55ca4fad1b0413fb3ddde5aeba603d04fbc51840d0"} Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.693631 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94d35abe85e5001051395a55ca4fad1b0413fb3ddde5aeba603d04fbc51840d0" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.693637 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-5jc4s" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.810698 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-x4qcm"] Jan 29 08:45:04 crc kubenswrapper[4861]: E0129 08:45:04.811281 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64030eee-0e1c-4038-968b-421a3542fa93" containerName="neutron-metadata-openstack-openstack-cell1" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.811308 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="64030eee-0e1c-4038-968b-421a3542fa93" containerName="neutron-metadata-openstack-openstack-cell1" Jan 29 08:45:04 crc kubenswrapper[4861]: E0129 08:45:04.811325 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20c8c33b-41c3-4245-8595-e2462cbb7feb" containerName="collect-profiles" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.811341 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="20c8c33b-41c3-4245-8595-e2462cbb7feb" containerName="collect-profiles" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.811638 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="64030eee-0e1c-4038-968b-421a3542fa93" containerName="neutron-metadata-openstack-openstack-cell1" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.811665 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="20c8c33b-41c3-4245-8595-e2462cbb7feb" containerName="collect-profiles" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.812542 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.815974 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.816000 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.816123 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.816254 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.816357 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.824885 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-x4qcm"] Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.855031 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.855102 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.855145 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-inventory\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.855189 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kmpm\" (UniqueName: \"kubernetes.io/projected/6e815d1a-6590-4dd3-95c2-f997fd213f09-kube-api-access-7kmpm\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.855281 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-ssh-key-openstack-cell1\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.957237 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.957311 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.957356 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-inventory\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.957403 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kmpm\" (UniqueName: \"kubernetes.io/projected/6e815d1a-6590-4dd3-95c2-f997fd213f09-kube-api-access-7kmpm\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.957481 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-ssh-key-openstack-cell1\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.961154 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.961230 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-inventory\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.961354 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.961484 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-ssh-key-openstack-cell1\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:04 crc kubenswrapper[4861]: I0129 08:45:04.981987 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kmpm\" (UniqueName: \"kubernetes.io/projected/6e815d1a-6590-4dd3-95c2-f997fd213f09-kube-api-access-7kmpm\") pod \"libvirt-openstack-openstack-cell1-x4qcm\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:05 crc kubenswrapper[4861]: I0129 08:45:05.133818 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a9e7e75-ce79-4d87-9819-043b04ded202" path="/var/lib/kubelet/pods/9a9e7e75-ce79-4d87-9819-043b04ded202/volumes" Jan 29 08:45:05 crc kubenswrapper[4861]: I0129 08:45:05.138991 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:45:05 crc kubenswrapper[4861]: I0129 08:45:05.657223 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-x4qcm"] Jan 29 08:45:05 crc kubenswrapper[4861]: W0129 08:45:05.661520 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e815d1a_6590_4dd3_95c2_f997fd213f09.slice/crio-9f81027bc702f3e9e2a7bf81518689328cd908a3099341ec6a08260774311236 WatchSource:0}: Error finding container 9f81027bc702f3e9e2a7bf81518689328cd908a3099341ec6a08260774311236: Status 404 returned error can't find the container with id 9f81027bc702f3e9e2a7bf81518689328cd908a3099341ec6a08260774311236 Jan 29 08:45:05 crc kubenswrapper[4861]: I0129 08:45:05.663815 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:45:05 crc kubenswrapper[4861]: I0129 08:45:05.703784 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" event={"ID":"6e815d1a-6590-4dd3-95c2-f997fd213f09","Type":"ContainerStarted","Data":"9f81027bc702f3e9e2a7bf81518689328cd908a3099341ec6a08260774311236"} Jan 29 08:45:07 crc kubenswrapper[4861]: I0129 08:45:07.723827 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" event={"ID":"6e815d1a-6590-4dd3-95c2-f997fd213f09","Type":"ContainerStarted","Data":"fca6f817ed7fa9de4988b793d527145618fd3ba6fe2aa16b8598663e2d395df4"} Jan 29 08:45:07 crc kubenswrapper[4861]: I0129 08:45:07.749543 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" podStartSLOduration=2.172972909 podStartE2EDuration="3.749520699s" podCreationTimestamp="2026-01-29 08:45:04 +0000 UTC" firstStartedPulling="2026-01-29 08:45:05.663495766 +0000 UTC m=+7797.334990323" lastFinishedPulling="2026-01-29 08:45:07.240043556 +0000 UTC m=+7798.911538113" observedRunningTime="2026-01-29 08:45:07.741921727 +0000 UTC m=+7799.413416284" watchObservedRunningTime="2026-01-29 08:45:07.749520699 +0000 UTC m=+7799.421015256" Jan 29 08:45:09 crc kubenswrapper[4861]: I0129 08:45:09.126207 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:45:09 crc kubenswrapper[4861]: E0129 08:45:09.126813 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:45:23 crc kubenswrapper[4861]: I0129 08:45:23.116925 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:45:23 crc kubenswrapper[4861]: E0129 08:45:23.118116 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:45:32 crc kubenswrapper[4861]: I0129 08:45:32.950849 4861 scope.go:117] "RemoveContainer" containerID="8f81ccaee78f79f9a9441d74c3a9700ea0caad086a65b57acbddaa3bb269b772" Jan 29 08:45:34 crc kubenswrapper[4861]: I0129 08:45:34.117289 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:45:34 crc kubenswrapper[4861]: E0129 08:45:34.117856 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:45:45 crc kubenswrapper[4861]: I0129 08:45:45.117354 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:45:45 crc kubenswrapper[4861]: E0129 08:45:45.118411 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:45:58 crc kubenswrapper[4861]: I0129 08:45:58.116736 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:45:58 crc kubenswrapper[4861]: E0129 08:45:58.117523 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:46:11 crc kubenswrapper[4861]: I0129 08:46:11.118399 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:46:11 crc kubenswrapper[4861]: E0129 08:46:11.120014 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:46:23 crc kubenswrapper[4861]: I0129 08:46:23.117717 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:46:23 crc kubenswrapper[4861]: E0129 08:46:23.118968 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:46:38 crc kubenswrapper[4861]: I0129 08:46:38.117010 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:46:38 crc kubenswrapper[4861]: E0129 08:46:38.122491 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:46:49 crc kubenswrapper[4861]: I0129 08:46:49.124583 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:46:49 crc kubenswrapper[4861]: E0129 08:46:49.125564 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:47:02 crc kubenswrapper[4861]: I0129 08:47:02.117308 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:47:02 crc kubenswrapper[4861]: E0129 08:47:02.117928 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:47:13 crc kubenswrapper[4861]: I0129 08:47:13.116781 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:47:13 crc kubenswrapper[4861]: E0129 08:47:13.117617 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:47:25 crc kubenswrapper[4861]: I0129 08:47:25.118052 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:47:25 crc kubenswrapper[4861]: E0129 08:47:25.120536 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:47:33 crc kubenswrapper[4861]: I0129 08:47:33.073758 4861 scope.go:117] "RemoveContainer" containerID="ddb38801310057bc8faa0ad91cfac1412fcca6eb7111521c89cc531a3e143a1d" Jan 29 08:47:33 crc kubenswrapper[4861]: I0129 08:47:33.095500 4861 scope.go:117] "RemoveContainer" containerID="bda2b0a562d83523ff4e83d2a1b7c45f89921e84c3246eb39a3f19f6f25091ef" Jan 29 08:47:33 crc kubenswrapper[4861]: I0129 08:47:33.159246 4861 scope.go:117] "RemoveContainer" containerID="740338a2c75e93285518143fa3a64624097f515d5eaccdb84ce5b9fc553437f0" Jan 29 08:47:36 crc kubenswrapper[4861]: I0129 08:47:36.116839 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:47:36 crc kubenswrapper[4861]: E0129 08:47:36.117726 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:47:50 crc kubenswrapper[4861]: I0129 08:47:50.116711 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:47:50 crc kubenswrapper[4861]: E0129 08:47:50.118837 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:48:05 crc kubenswrapper[4861]: I0129 08:48:05.117313 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:48:05 crc kubenswrapper[4861]: E0129 08:48:05.118583 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:48:19 crc kubenswrapper[4861]: I0129 08:48:19.124875 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:48:19 crc kubenswrapper[4861]: E0129 08:48:19.125825 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:48:31 crc kubenswrapper[4861]: I0129 08:48:31.117271 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:48:31 crc kubenswrapper[4861]: E0129 08:48:31.118108 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.071526 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l2nn5"] Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.074631 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.083360 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l2nn5"] Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.157918 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-utilities\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.158502 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-catalog-content\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.158717 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdbp9\" (UniqueName: \"kubernetes.io/projected/ff8cdaf3-b204-4327-bef1-53978da2f004-kube-api-access-zdbp9\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.261169 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-catalog-content\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.261287 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdbp9\" (UniqueName: \"kubernetes.io/projected/ff8cdaf3-b204-4327-bef1-53978da2f004-kube-api-access-zdbp9\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.261335 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-utilities\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.261828 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-catalog-content\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.261934 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-utilities\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.280779 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdbp9\" (UniqueName: \"kubernetes.io/projected/ff8cdaf3-b204-4327-bef1-53978da2f004-kube-api-access-zdbp9\") pod \"community-operators-l2nn5\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:39 crc kubenswrapper[4861]: I0129 08:48:39.409585 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:40 crc kubenswrapper[4861]: I0129 08:48:40.239488 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l2nn5"] Jan 29 08:48:40 crc kubenswrapper[4861]: I0129 08:48:40.852341 4861 generic.go:334] "Generic (PLEG): container finished" podID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerID="6d0149bc338efb874a421ad8c25019963f28ac80cceba33706e30f1368128972" exitCode=0 Jan 29 08:48:40 crc kubenswrapper[4861]: I0129 08:48:40.852476 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2nn5" event={"ID":"ff8cdaf3-b204-4327-bef1-53978da2f004","Type":"ContainerDied","Data":"6d0149bc338efb874a421ad8c25019963f28ac80cceba33706e30f1368128972"} Jan 29 08:48:40 crc kubenswrapper[4861]: I0129 08:48:40.852731 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2nn5" event={"ID":"ff8cdaf3-b204-4327-bef1-53978da2f004","Type":"ContainerStarted","Data":"4dd044de0ddb630712b25d714ba451f534ae178b86dca7a33e99c2107dedf6ad"} Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.472462 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p45lc"] Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.474776 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.489271 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p45lc"] Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.619205 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m8bj\" (UniqueName: \"kubernetes.io/projected/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-kube-api-access-9m8bj\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.619264 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-utilities\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.619533 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-catalog-content\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.722062 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m8bj\" (UniqueName: \"kubernetes.io/projected/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-kube-api-access-9m8bj\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.722134 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-utilities\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.722192 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-catalog-content\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.723876 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-catalog-content\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.723955 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-utilities\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.744748 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m8bj\" (UniqueName: \"kubernetes.io/projected/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-kube-api-access-9m8bj\") pod \"redhat-marketplace-p45lc\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.805405 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:41 crc kubenswrapper[4861]: I0129 08:48:41.884428 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2nn5" event={"ID":"ff8cdaf3-b204-4327-bef1-53978da2f004","Type":"ContainerStarted","Data":"705a3d15c9a29a09faa8acc850bf556adb0e297ff6a6b67ee80c812052caa179"} Jan 29 08:48:42 crc kubenswrapper[4861]: I0129 08:48:42.433896 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p45lc"] Jan 29 08:48:42 crc kubenswrapper[4861]: I0129 08:48:42.896880 4861 generic.go:334] "Generic (PLEG): container finished" podID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerID="65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449" exitCode=0 Jan 29 08:48:42 crc kubenswrapper[4861]: I0129 08:48:42.897136 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p45lc" event={"ID":"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27","Type":"ContainerDied","Data":"65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449"} Jan 29 08:48:42 crc kubenswrapper[4861]: I0129 08:48:42.897170 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p45lc" event={"ID":"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27","Type":"ContainerStarted","Data":"25ecddad4dd93ccfd517c8028368da3b5b2bb2827323f8de70579b2705daafb7"} Jan 29 08:48:42 crc kubenswrapper[4861]: I0129 08:48:42.905858 4861 generic.go:334] "Generic (PLEG): container finished" podID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerID="705a3d15c9a29a09faa8acc850bf556adb0e297ff6a6b67ee80c812052caa179" exitCode=0 Jan 29 08:48:42 crc kubenswrapper[4861]: I0129 08:48:42.906007 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2nn5" event={"ID":"ff8cdaf3-b204-4327-bef1-53978da2f004","Type":"ContainerDied","Data":"705a3d15c9a29a09faa8acc850bf556adb0e297ff6a6b67ee80c812052caa179"} Jan 29 08:48:43 crc kubenswrapper[4861]: I0129 08:48:43.917795 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p45lc" event={"ID":"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27","Type":"ContainerStarted","Data":"7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af"} Jan 29 08:48:43 crc kubenswrapper[4861]: I0129 08:48:43.921327 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2nn5" event={"ID":"ff8cdaf3-b204-4327-bef1-53978da2f004","Type":"ContainerStarted","Data":"686e1fa760b182ce2c532fba8bd80211fa28e8bdc234c99b02a76420ef69aaed"} Jan 29 08:48:43 crc kubenswrapper[4861]: I0129 08:48:43.967387 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l2nn5" podStartSLOduration=2.530096221 podStartE2EDuration="4.967359737s" podCreationTimestamp="2026-01-29 08:48:39 +0000 UTC" firstStartedPulling="2026-01-29 08:48:40.854109048 +0000 UTC m=+8012.525603605" lastFinishedPulling="2026-01-29 08:48:43.291372564 +0000 UTC m=+8014.962867121" observedRunningTime="2026-01-29 08:48:43.964261865 +0000 UTC m=+8015.635756442" watchObservedRunningTime="2026-01-29 08:48:43.967359737 +0000 UTC m=+8015.638854294" Jan 29 08:48:44 crc kubenswrapper[4861]: I0129 08:48:44.931287 4861 generic.go:334] "Generic (PLEG): container finished" podID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerID="7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af" exitCode=0 Jan 29 08:48:44 crc kubenswrapper[4861]: I0129 08:48:44.931380 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p45lc" event={"ID":"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27","Type":"ContainerDied","Data":"7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af"} Jan 29 08:48:44 crc kubenswrapper[4861]: E0129 08:48:44.987480 4861 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7810eded_b29f_4a55_8c0d_f8b9f6fc9e27.slice/crio-7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7810eded_b29f_4a55_8c0d_f8b9f6fc9e27.slice/crio-conmon-7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af.scope\": RecentStats: unable to find data in memory cache]" Jan 29 08:48:45 crc kubenswrapper[4861]: I0129 08:48:45.116588 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:48:45 crc kubenswrapper[4861]: E0129 08:48:45.116974 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:48:45 crc kubenswrapper[4861]: I0129 08:48:45.941481 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p45lc" event={"ID":"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27","Type":"ContainerStarted","Data":"197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c"} Jan 29 08:48:45 crc kubenswrapper[4861]: I0129 08:48:45.966679 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p45lc" podStartSLOduration=2.485512439 podStartE2EDuration="4.966656402s" podCreationTimestamp="2026-01-29 08:48:41 +0000 UTC" firstStartedPulling="2026-01-29 08:48:42.898347589 +0000 UTC m=+8014.569842146" lastFinishedPulling="2026-01-29 08:48:45.379491552 +0000 UTC m=+8017.050986109" observedRunningTime="2026-01-29 08:48:45.959372148 +0000 UTC m=+8017.630866715" watchObservedRunningTime="2026-01-29 08:48:45.966656402 +0000 UTC m=+8017.638150959" Jan 29 08:48:49 crc kubenswrapper[4861]: I0129 08:48:49.410147 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:49 crc kubenswrapper[4861]: I0129 08:48:49.410763 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:49 crc kubenswrapper[4861]: I0129 08:48:49.464283 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:50 crc kubenswrapper[4861]: I0129 08:48:50.031882 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:50 crc kubenswrapper[4861]: I0129 08:48:50.258645 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l2nn5"] Jan 29 08:48:51 crc kubenswrapper[4861]: I0129 08:48:51.806028 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:51 crc kubenswrapper[4861]: I0129 08:48:51.806461 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:51 crc kubenswrapper[4861]: I0129 08:48:51.856695 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:52 crc kubenswrapper[4861]: I0129 08:48:52.006103 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l2nn5" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerName="registry-server" containerID="cri-o://686e1fa760b182ce2c532fba8bd80211fa28e8bdc234c99b02a76420ef69aaed" gracePeriod=2 Jan 29 08:48:52 crc kubenswrapper[4861]: I0129 08:48:52.054708 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:52 crc kubenswrapper[4861]: I0129 08:48:52.662048 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p45lc"] Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.027447 4861 generic.go:334] "Generic (PLEG): container finished" podID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerID="686e1fa760b182ce2c532fba8bd80211fa28e8bdc234c99b02a76420ef69aaed" exitCode=0 Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.027556 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2nn5" event={"ID":"ff8cdaf3-b204-4327-bef1-53978da2f004","Type":"ContainerDied","Data":"686e1fa760b182ce2c532fba8bd80211fa28e8bdc234c99b02a76420ef69aaed"} Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.204650 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.306007 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-utilities\") pod \"ff8cdaf3-b204-4327-bef1-53978da2f004\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.306234 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-catalog-content\") pod \"ff8cdaf3-b204-4327-bef1-53978da2f004\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.306334 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdbp9\" (UniqueName: \"kubernetes.io/projected/ff8cdaf3-b204-4327-bef1-53978da2f004-kube-api-access-zdbp9\") pod \"ff8cdaf3-b204-4327-bef1-53978da2f004\" (UID: \"ff8cdaf3-b204-4327-bef1-53978da2f004\") " Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.309637 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-utilities" (OuterVolumeSpecName: "utilities") pod "ff8cdaf3-b204-4327-bef1-53978da2f004" (UID: "ff8cdaf3-b204-4327-bef1-53978da2f004"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.312027 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff8cdaf3-b204-4327-bef1-53978da2f004-kube-api-access-zdbp9" (OuterVolumeSpecName: "kube-api-access-zdbp9") pod "ff8cdaf3-b204-4327-bef1-53978da2f004" (UID: "ff8cdaf3-b204-4327-bef1-53978da2f004"). InnerVolumeSpecName "kube-api-access-zdbp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.364708 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff8cdaf3-b204-4327-bef1-53978da2f004" (UID: "ff8cdaf3-b204-4327-bef1-53978da2f004"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.408545 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.408584 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff8cdaf3-b204-4327-bef1-53978da2f004-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:48:53 crc kubenswrapper[4861]: I0129 08:48:53.408597 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdbp9\" (UniqueName: \"kubernetes.io/projected/ff8cdaf3-b204-4327-bef1-53978da2f004-kube-api-access-zdbp9\") on node \"crc\" DevicePath \"\"" Jan 29 08:48:54 crc kubenswrapper[4861]: I0129 08:48:54.048942 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l2nn5" event={"ID":"ff8cdaf3-b204-4327-bef1-53978da2f004","Type":"ContainerDied","Data":"4dd044de0ddb630712b25d714ba451f534ae178b86dca7a33e99c2107dedf6ad"} Jan 29 08:48:54 crc kubenswrapper[4861]: I0129 08:48:54.049166 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p45lc" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerName="registry-server" containerID="cri-o://197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c" gracePeriod=2 Jan 29 08:48:54 crc kubenswrapper[4861]: I0129 08:48:54.049299 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l2nn5" Jan 29 08:48:54 crc kubenswrapper[4861]: I0129 08:48:54.050342 4861 scope.go:117] "RemoveContainer" containerID="686e1fa760b182ce2c532fba8bd80211fa28e8bdc234c99b02a76420ef69aaed" Jan 29 08:48:54 crc kubenswrapper[4861]: I0129 08:48:54.095231 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l2nn5"] Jan 29 08:48:54 crc kubenswrapper[4861]: I0129 08:48:54.104112 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l2nn5"] Jan 29 08:48:54 crc kubenswrapper[4861]: I0129 08:48:54.119091 4861 scope.go:117] "RemoveContainer" containerID="705a3d15c9a29a09faa8acc850bf556adb0e297ff6a6b67ee80c812052caa179" Jan 29 08:48:54 crc kubenswrapper[4861]: I0129 08:48:54.142476 4861 scope.go:117] "RemoveContainer" containerID="6d0149bc338efb874a421ad8c25019963f28ac80cceba33706e30f1368128972" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.033928 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.074160 4861 generic.go:334] "Generic (PLEG): container finished" podID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerID="197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c" exitCode=0 Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.074217 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p45lc" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.074238 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p45lc" event={"ID":"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27","Type":"ContainerDied","Data":"197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c"} Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.074289 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p45lc" event={"ID":"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27","Type":"ContainerDied","Data":"25ecddad4dd93ccfd517c8028368da3b5b2bb2827323f8de70579b2705daafb7"} Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.074327 4861 scope.go:117] "RemoveContainer" containerID="197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.110113 4861 scope.go:117] "RemoveContainer" containerID="7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.128859 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" path="/var/lib/kubelet/pods/ff8cdaf3-b204-4327-bef1-53978da2f004/volumes" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.135452 4861 scope.go:117] "RemoveContainer" containerID="65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.161277 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9m8bj\" (UniqueName: \"kubernetes.io/projected/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-kube-api-access-9m8bj\") pod \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.161411 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-utilities\") pod \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.161449 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-catalog-content\") pod \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\" (UID: \"7810eded-b29f-4a55-8c0d-f8b9f6fc9e27\") " Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.166547 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-utilities" (OuterVolumeSpecName: "utilities") pod "7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" (UID: "7810eded-b29f-4a55-8c0d-f8b9f6fc9e27"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.168558 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-kube-api-access-9m8bj" (OuterVolumeSpecName: "kube-api-access-9m8bj") pod "7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" (UID: "7810eded-b29f-4a55-8c0d-f8b9f6fc9e27"). InnerVolumeSpecName "kube-api-access-9m8bj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.197202 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" (UID: "7810eded-b29f-4a55-8c0d-f8b9f6fc9e27"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.255017 4861 scope.go:117] "RemoveContainer" containerID="197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c" Jan 29 08:48:55 crc kubenswrapper[4861]: E0129 08:48:55.257241 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c\": container with ID starting with 197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c not found: ID does not exist" containerID="197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.257374 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c"} err="failed to get container status \"197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c\": rpc error: code = NotFound desc = could not find container \"197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c\": container with ID starting with 197b1c542b3626d17add6a18c5ea484c002be8d90f77d6b5db413cc969426b4c not found: ID does not exist" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.257406 4861 scope.go:117] "RemoveContainer" containerID="7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af" Jan 29 08:48:55 crc kubenswrapper[4861]: E0129 08:48:55.258289 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af\": container with ID starting with 7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af not found: ID does not exist" containerID="7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.258322 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af"} err="failed to get container status \"7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af\": rpc error: code = NotFound desc = could not find container \"7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af\": container with ID starting with 7cee4b3776e0a94bcd4c4f8c52520142e5ef2de8ddabbb5885660995bfcf64af not found: ID does not exist" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.258342 4861 scope.go:117] "RemoveContainer" containerID="65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449" Jan 29 08:48:55 crc kubenswrapper[4861]: E0129 08:48:55.261177 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449\": container with ID starting with 65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449 not found: ID does not exist" containerID="65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.261237 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449"} err="failed to get container status \"65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449\": rpc error: code = NotFound desc = could not find container \"65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449\": container with ID starting with 65b075bff416b1ab949d2602b02cec314ff8b0dfe1eff8ea953840106a32e449 not found: ID does not exist" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.267204 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9m8bj\" (UniqueName: \"kubernetes.io/projected/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-kube-api-access-9m8bj\") on node \"crc\" DevicePath \"\"" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.267468 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.267478 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.419033 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p45lc"] Jan 29 08:48:55 crc kubenswrapper[4861]: I0129 08:48:55.428392 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p45lc"] Jan 29 08:48:57 crc kubenswrapper[4861]: I0129 08:48:57.117923 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:48:57 crc kubenswrapper[4861]: E0129 08:48:57.118584 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:48:57 crc kubenswrapper[4861]: I0129 08:48:57.128555 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" path="/var/lib/kubelet/pods/7810eded-b29f-4a55-8c0d-f8b9f6fc9e27/volumes" Jan 29 08:49:12 crc kubenswrapper[4861]: I0129 08:49:12.117355 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:49:12 crc kubenswrapper[4861]: E0129 08:49:12.118163 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:49:26 crc kubenswrapper[4861]: I0129 08:49:26.117839 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:49:26 crc kubenswrapper[4861]: E0129 08:49:26.118630 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:49:32 crc kubenswrapper[4861]: I0129 08:49:32.422147 4861 generic.go:334] "Generic (PLEG): container finished" podID="6e815d1a-6590-4dd3-95c2-f997fd213f09" containerID="fca6f817ed7fa9de4988b793d527145618fd3ba6fe2aa16b8598663e2d395df4" exitCode=0 Jan 29 08:49:32 crc kubenswrapper[4861]: I0129 08:49:32.422227 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" event={"ID":"6e815d1a-6590-4dd3-95c2-f997fd213f09","Type":"ContainerDied","Data":"fca6f817ed7fa9de4988b793d527145618fd3ba6fe2aa16b8598663e2d395df4"} Jan 29 08:49:33 crc kubenswrapper[4861]: I0129 08:49:33.883138 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.052136 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-inventory\") pod \"6e815d1a-6590-4dd3-95c2-f997fd213f09\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.052466 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-ssh-key-openstack-cell1\") pod \"6e815d1a-6590-4dd3-95c2-f997fd213f09\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.052510 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kmpm\" (UniqueName: \"kubernetes.io/projected/6e815d1a-6590-4dd3-95c2-f997fd213f09-kube-api-access-7kmpm\") pod \"6e815d1a-6590-4dd3-95c2-f997fd213f09\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.052587 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-combined-ca-bundle\") pod \"6e815d1a-6590-4dd3-95c2-f997fd213f09\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.052692 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-secret-0\") pod \"6e815d1a-6590-4dd3-95c2-f997fd213f09\" (UID: \"6e815d1a-6590-4dd3-95c2-f997fd213f09\") " Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.057610 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "6e815d1a-6590-4dd3-95c2-f997fd213f09" (UID: "6e815d1a-6590-4dd3-95c2-f997fd213f09"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.063952 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e815d1a-6590-4dd3-95c2-f997fd213f09-kube-api-access-7kmpm" (OuterVolumeSpecName: "kube-api-access-7kmpm") pod "6e815d1a-6590-4dd3-95c2-f997fd213f09" (UID: "6e815d1a-6590-4dd3-95c2-f997fd213f09"). InnerVolumeSpecName "kube-api-access-7kmpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.081907 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "6e815d1a-6590-4dd3-95c2-f997fd213f09" (UID: "6e815d1a-6590-4dd3-95c2-f997fd213f09"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.092216 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "6e815d1a-6590-4dd3-95c2-f997fd213f09" (UID: "6e815d1a-6590-4dd3-95c2-f997fd213f09"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.104788 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-inventory" (OuterVolumeSpecName: "inventory") pod "6e815d1a-6590-4dd3-95c2-f997fd213f09" (UID: "6e815d1a-6590-4dd3-95c2-f997fd213f09"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.160572 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.160619 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.160634 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kmpm\" (UniqueName: \"kubernetes.io/projected/6e815d1a-6590-4dd3-95c2-f997fd213f09-kube-api-access-7kmpm\") on node \"crc\" DevicePath \"\"" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.160648 4861 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.160661 4861 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6e815d1a-6590-4dd3-95c2-f997fd213f09-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.444519 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" event={"ID":"6e815d1a-6590-4dd3-95c2-f997fd213f09","Type":"ContainerDied","Data":"9f81027bc702f3e9e2a7bf81518689328cd908a3099341ec6a08260774311236"} Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.444570 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f81027bc702f3e9e2a7bf81518689328cd908a3099341ec6a08260774311236" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.444588 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-x4qcm" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.546465 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-vkvwl"] Jan 29 08:49:34 crc kubenswrapper[4861]: E0129 08:49:34.547306 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerName="registry-server" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547328 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerName="registry-server" Jan 29 08:49:34 crc kubenswrapper[4861]: E0129 08:49:34.547341 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerName="extract-utilities" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547350 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerName="extract-utilities" Jan 29 08:49:34 crc kubenswrapper[4861]: E0129 08:49:34.547372 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerName="extract-content" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547382 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerName="extract-content" Jan 29 08:49:34 crc kubenswrapper[4861]: E0129 08:49:34.547402 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerName="extract-utilities" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547409 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerName="extract-utilities" Jan 29 08:49:34 crc kubenswrapper[4861]: E0129 08:49:34.547429 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerName="registry-server" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547436 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerName="registry-server" Jan 29 08:49:34 crc kubenswrapper[4861]: E0129 08:49:34.547450 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e815d1a-6590-4dd3-95c2-f997fd213f09" containerName="libvirt-openstack-openstack-cell1" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547458 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e815d1a-6590-4dd3-95c2-f997fd213f09" containerName="libvirt-openstack-openstack-cell1" Jan 29 08:49:34 crc kubenswrapper[4861]: E0129 08:49:34.547476 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerName="extract-content" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547483 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerName="extract-content" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547728 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="7810eded-b29f-4a55-8c0d-f8b9f6fc9e27" containerName="registry-server" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547757 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff8cdaf3-b204-4327-bef1-53978da2f004" containerName="registry-server" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.547773 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e815d1a-6590-4dd3-95c2-f997fd213f09" containerName="libvirt-openstack-openstack-cell1" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.548691 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.554015 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.554598 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.554647 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.554927 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.555108 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.555446 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.555473 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.567902 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-inventory\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568031 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568120 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nq89\" (UniqueName: \"kubernetes.io/projected/ef737a6a-1c77-460c-9152-952334d3ede1-kube-api-access-7nq89\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568221 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568252 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568285 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568392 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568432 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568458 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-vkvwl"] Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.568507 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.670643 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-inventory\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.670763 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.670799 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nq89\" (UniqueName: \"kubernetes.io/projected/ef737a6a-1c77-460c-9152-952334d3ede1-kube-api-access-7nq89\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.670841 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.670858 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.670889 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.670958 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.670988 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.671046 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.672739 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.674486 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.674540 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.675020 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.675488 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.675715 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.675846 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.688897 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-inventory\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.689600 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nq89\" (UniqueName: \"kubernetes.io/projected/ef737a6a-1c77-460c-9152-952334d3ede1-kube-api-access-7nq89\") pod \"nova-cell1-openstack-openstack-cell1-vkvwl\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:34 crc kubenswrapper[4861]: I0129 08:49:34.869004 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:49:35 crc kubenswrapper[4861]: I0129 08:49:35.427446 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-vkvwl"] Jan 29 08:49:35 crc kubenswrapper[4861]: I0129 08:49:35.460217 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" event={"ID":"ef737a6a-1c77-460c-9152-952334d3ede1","Type":"ContainerStarted","Data":"468bf94eda8e30ac2f5983f61b38a11a578170e7bf32112eb60c16c22258d892"} Jan 29 08:49:36 crc kubenswrapper[4861]: I0129 08:49:36.469343 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" event={"ID":"ef737a6a-1c77-460c-9152-952334d3ede1","Type":"ContainerStarted","Data":"4c8e14eacbc8472a129e795dfe44907dc98d5aeaf1cf19248184dade577302d0"} Jan 29 08:49:36 crc kubenswrapper[4861]: I0129 08:49:36.488326 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" podStartSLOduration=1.964704911 podStartE2EDuration="2.48830273s" podCreationTimestamp="2026-01-29 08:49:34 +0000 UTC" firstStartedPulling="2026-01-29 08:49:35.44025566 +0000 UTC m=+8067.111750217" lastFinishedPulling="2026-01-29 08:49:35.963853479 +0000 UTC m=+8067.635348036" observedRunningTime="2026-01-29 08:49:36.484357515 +0000 UTC m=+8068.155852082" watchObservedRunningTime="2026-01-29 08:49:36.48830273 +0000 UTC m=+8068.159797287" Jan 29 08:49:39 crc kubenswrapper[4861]: I0129 08:49:39.123396 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:49:39 crc kubenswrapper[4861]: I0129 08:49:39.499871 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"87606b0fe7e00e9d70953c778824d4c6bef40fc9cbeb821b1f5c869e6ae6b8a4"} Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.760537 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j484s"] Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.763031 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.784595 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j484s"] Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.892049 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-utilities\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.892365 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26sf5\" (UniqueName: \"kubernetes.io/projected/aa95fe3f-f16d-4700-95a5-755b84619eff-kube-api-access-26sf5\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.892551 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-catalog-content\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.994368 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-catalog-content\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.994542 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-utilities\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.994638 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26sf5\" (UniqueName: \"kubernetes.io/projected/aa95fe3f-f16d-4700-95a5-755b84619eff-kube-api-access-26sf5\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.994910 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-catalog-content\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:03 crc kubenswrapper[4861]: I0129 08:51:03.995257 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-utilities\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:04 crc kubenswrapper[4861]: I0129 08:51:04.022263 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26sf5\" (UniqueName: \"kubernetes.io/projected/aa95fe3f-f16d-4700-95a5-755b84619eff-kube-api-access-26sf5\") pod \"certified-operators-j484s\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:04 crc kubenswrapper[4861]: I0129 08:51:04.097786 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:04 crc kubenswrapper[4861]: I0129 08:51:04.666449 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j484s"] Jan 29 08:51:05 crc kubenswrapper[4861]: I0129 08:51:05.312527 4861 generic.go:334] "Generic (PLEG): container finished" podID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerID="d5167776553ab91f65aca548c7ad4ad0d8263ece7146bb5bdc3d4f5212d2b6fd" exitCode=0 Jan 29 08:51:05 crc kubenswrapper[4861]: I0129 08:51:05.312628 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j484s" event={"ID":"aa95fe3f-f16d-4700-95a5-755b84619eff","Type":"ContainerDied","Data":"d5167776553ab91f65aca548c7ad4ad0d8263ece7146bb5bdc3d4f5212d2b6fd"} Jan 29 08:51:05 crc kubenswrapper[4861]: I0129 08:51:05.313010 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j484s" event={"ID":"aa95fe3f-f16d-4700-95a5-755b84619eff","Type":"ContainerStarted","Data":"6149ce22be6948aa2a634f74dc461d4fa83f4f8fef898151eec3332fa632a67d"} Jan 29 08:51:05 crc kubenswrapper[4861]: I0129 08:51:05.315771 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:51:07 crc kubenswrapper[4861]: I0129 08:51:07.341992 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j484s" event={"ID":"aa95fe3f-f16d-4700-95a5-755b84619eff","Type":"ContainerStarted","Data":"c9206cc091274ae71c0296c6cae33390d60f59e6d6cde31043ca28a4cbd4d614"} Jan 29 08:51:08 crc kubenswrapper[4861]: I0129 08:51:08.350332 4861 generic.go:334] "Generic (PLEG): container finished" podID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerID="c9206cc091274ae71c0296c6cae33390d60f59e6d6cde31043ca28a4cbd4d614" exitCode=0 Jan 29 08:51:08 crc kubenswrapper[4861]: I0129 08:51:08.350394 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j484s" event={"ID":"aa95fe3f-f16d-4700-95a5-755b84619eff","Type":"ContainerDied","Data":"c9206cc091274ae71c0296c6cae33390d60f59e6d6cde31043ca28a4cbd4d614"} Jan 29 08:51:09 crc kubenswrapper[4861]: I0129 08:51:09.361419 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j484s" event={"ID":"aa95fe3f-f16d-4700-95a5-755b84619eff","Type":"ContainerStarted","Data":"4b5a76c04c0335fdbcf8c83f5f250cf0e2b6aa00375950e70fdbafb8dd5abd17"} Jan 29 08:51:09 crc kubenswrapper[4861]: I0129 08:51:09.389550 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j484s" podStartSLOduration=2.827909168 podStartE2EDuration="6.389529884s" podCreationTimestamp="2026-01-29 08:51:03 +0000 UTC" firstStartedPulling="2026-01-29 08:51:05.315469616 +0000 UTC m=+8156.986964173" lastFinishedPulling="2026-01-29 08:51:08.877090332 +0000 UTC m=+8160.548584889" observedRunningTime="2026-01-29 08:51:09.377917905 +0000 UTC m=+8161.049412492" watchObservedRunningTime="2026-01-29 08:51:09.389529884 +0000 UTC m=+8161.061024441" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.602798 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rm6zk"] Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.606772 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.624402 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rm6zk"] Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.718313 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-utilities\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.718688 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-catalog-content\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.718863 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfgz7\" (UniqueName: \"kubernetes.io/projected/d1176912-acea-4cd3-89b3-1fd5d2f6e824-kube-api-access-hfgz7\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.821289 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-catalog-content\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.821379 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfgz7\" (UniqueName: \"kubernetes.io/projected/d1176912-acea-4cd3-89b3-1fd5d2f6e824-kube-api-access-hfgz7\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.821453 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-utilities\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.822006 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-utilities\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.822281 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-catalog-content\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.844928 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfgz7\" (UniqueName: \"kubernetes.io/projected/d1176912-acea-4cd3-89b3-1fd5d2f6e824-kube-api-access-hfgz7\") pod \"redhat-operators-rm6zk\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:13 crc kubenswrapper[4861]: I0129 08:51:13.967848 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:14 crc kubenswrapper[4861]: I0129 08:51:14.098508 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:14 crc kubenswrapper[4861]: I0129 08:51:14.098570 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:14 crc kubenswrapper[4861]: I0129 08:51:14.167858 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:14 crc kubenswrapper[4861]: I0129 08:51:14.457253 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rm6zk"] Jan 29 08:51:14 crc kubenswrapper[4861]: I0129 08:51:14.498781 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:15 crc kubenswrapper[4861]: I0129 08:51:15.448658 4861 generic.go:334] "Generic (PLEG): container finished" podID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerID="697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99" exitCode=0 Jan 29 08:51:15 crc kubenswrapper[4861]: I0129 08:51:15.448747 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm6zk" event={"ID":"d1176912-acea-4cd3-89b3-1fd5d2f6e824","Type":"ContainerDied","Data":"697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99"} Jan 29 08:51:15 crc kubenswrapper[4861]: I0129 08:51:15.449045 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm6zk" event={"ID":"d1176912-acea-4cd3-89b3-1fd5d2f6e824","Type":"ContainerStarted","Data":"2ca883fb36fbf6cf39ba5b2071ba4306a338331fe515477c3ac749458ba8f3af"} Jan 29 08:51:16 crc kubenswrapper[4861]: I0129 08:51:16.460549 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm6zk" event={"ID":"d1176912-acea-4cd3-89b3-1fd5d2f6e824","Type":"ContainerStarted","Data":"e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42"} Jan 29 08:51:16 crc kubenswrapper[4861]: I0129 08:51:16.583510 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j484s"] Jan 29 08:51:16 crc kubenswrapper[4861]: I0129 08:51:16.583767 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j484s" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerName="registry-server" containerID="cri-o://4b5a76c04c0335fdbcf8c83f5f250cf0e2b6aa00375950e70fdbafb8dd5abd17" gracePeriod=2 Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.495300 4861 generic.go:334] "Generic (PLEG): container finished" podID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerID="4b5a76c04c0335fdbcf8c83f5f250cf0e2b6aa00375950e70fdbafb8dd5abd17" exitCode=0 Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.495386 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j484s" event={"ID":"aa95fe3f-f16d-4700-95a5-755b84619eff","Type":"ContainerDied","Data":"4b5a76c04c0335fdbcf8c83f5f250cf0e2b6aa00375950e70fdbafb8dd5abd17"} Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.784036 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.817154 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-catalog-content\") pod \"aa95fe3f-f16d-4700-95a5-755b84619eff\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.817222 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26sf5\" (UniqueName: \"kubernetes.io/projected/aa95fe3f-f16d-4700-95a5-755b84619eff-kube-api-access-26sf5\") pod \"aa95fe3f-f16d-4700-95a5-755b84619eff\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.817248 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-utilities\") pod \"aa95fe3f-f16d-4700-95a5-755b84619eff\" (UID: \"aa95fe3f-f16d-4700-95a5-755b84619eff\") " Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.818676 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-utilities" (OuterVolumeSpecName: "utilities") pod "aa95fe3f-f16d-4700-95a5-755b84619eff" (UID: "aa95fe3f-f16d-4700-95a5-755b84619eff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.825187 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa95fe3f-f16d-4700-95a5-755b84619eff-kube-api-access-26sf5" (OuterVolumeSpecName: "kube-api-access-26sf5") pod "aa95fe3f-f16d-4700-95a5-755b84619eff" (UID: "aa95fe3f-f16d-4700-95a5-755b84619eff"). InnerVolumeSpecName "kube-api-access-26sf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.918998 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26sf5\" (UniqueName: \"kubernetes.io/projected/aa95fe3f-f16d-4700-95a5-755b84619eff-kube-api-access-26sf5\") on node \"crc\" DevicePath \"\"" Jan 29 08:51:17 crc kubenswrapper[4861]: I0129 08:51:17.919254 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.057486 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aa95fe3f-f16d-4700-95a5-755b84619eff" (UID: "aa95fe3f-f16d-4700-95a5-755b84619eff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.122714 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa95fe3f-f16d-4700-95a5-755b84619eff-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.516314 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j484s" event={"ID":"aa95fe3f-f16d-4700-95a5-755b84619eff","Type":"ContainerDied","Data":"6149ce22be6948aa2a634f74dc461d4fa83f4f8fef898151eec3332fa632a67d"} Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.516361 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j484s" Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.516388 4861 scope.go:117] "RemoveContainer" containerID="4b5a76c04c0335fdbcf8c83f5f250cf0e2b6aa00375950e70fdbafb8dd5abd17" Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.548914 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j484s"] Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.550683 4861 scope.go:117] "RemoveContainer" containerID="c9206cc091274ae71c0296c6cae33390d60f59e6d6cde31043ca28a4cbd4d614" Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.557507 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j484s"] Jan 29 08:51:18 crc kubenswrapper[4861]: I0129 08:51:18.587845 4861 scope.go:117] "RemoveContainer" containerID="d5167776553ab91f65aca548c7ad4ad0d8263ece7146bb5bdc3d4f5212d2b6fd" Jan 29 08:51:19 crc kubenswrapper[4861]: I0129 08:51:19.128878 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" path="/var/lib/kubelet/pods/aa95fe3f-f16d-4700-95a5-755b84619eff/volumes" Jan 29 08:51:22 crc kubenswrapper[4861]: I0129 08:51:22.567615 4861 generic.go:334] "Generic (PLEG): container finished" podID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerID="e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42" exitCode=0 Jan 29 08:51:22 crc kubenswrapper[4861]: I0129 08:51:22.567765 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm6zk" event={"ID":"d1176912-acea-4cd3-89b3-1fd5d2f6e824","Type":"ContainerDied","Data":"e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42"} Jan 29 08:51:23 crc kubenswrapper[4861]: I0129 08:51:23.578541 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm6zk" event={"ID":"d1176912-acea-4cd3-89b3-1fd5d2f6e824","Type":"ContainerStarted","Data":"1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e"} Jan 29 08:51:23 crc kubenswrapper[4861]: I0129 08:51:23.613400 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rm6zk" podStartSLOduration=3.002025569 podStartE2EDuration="10.613379115s" podCreationTimestamp="2026-01-29 08:51:13 +0000 UTC" firstStartedPulling="2026-01-29 08:51:15.450685402 +0000 UTC m=+8167.122179959" lastFinishedPulling="2026-01-29 08:51:23.062038948 +0000 UTC m=+8174.733533505" observedRunningTime="2026-01-29 08:51:23.60380841 +0000 UTC m=+8175.275302977" watchObservedRunningTime="2026-01-29 08:51:23.613379115 +0000 UTC m=+8175.284873672" Jan 29 08:51:23 crc kubenswrapper[4861]: I0129 08:51:23.968983 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:23 crc kubenswrapper[4861]: I0129 08:51:23.969337 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:25 crc kubenswrapper[4861]: I0129 08:51:25.051488 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rm6zk" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="registry-server" probeResult="failure" output=< Jan 29 08:51:25 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:51:25 crc kubenswrapper[4861]: > Jan 29 08:51:35 crc kubenswrapper[4861]: I0129 08:51:35.018008 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rm6zk" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="registry-server" probeResult="failure" output=< Jan 29 08:51:35 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 08:51:35 crc kubenswrapper[4861]: > Jan 29 08:51:44 crc kubenswrapper[4861]: I0129 08:51:44.018280 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:44 crc kubenswrapper[4861]: I0129 08:51:44.076632 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:44 crc kubenswrapper[4861]: I0129 08:51:44.797554 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rm6zk"] Jan 29 08:51:45 crc kubenswrapper[4861]: I0129 08:51:45.791148 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rm6zk" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="registry-server" containerID="cri-o://1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e" gracePeriod=2 Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.398980 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.592397 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-catalog-content\") pod \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.592734 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfgz7\" (UniqueName: \"kubernetes.io/projected/d1176912-acea-4cd3-89b3-1fd5d2f6e824-kube-api-access-hfgz7\") pod \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.592886 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-utilities\") pod \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\" (UID: \"d1176912-acea-4cd3-89b3-1fd5d2f6e824\") " Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.593376 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-utilities" (OuterVolumeSpecName: "utilities") pod "d1176912-acea-4cd3-89b3-1fd5d2f6e824" (UID: "d1176912-acea-4cd3-89b3-1fd5d2f6e824"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.593737 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.615229 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1176912-acea-4cd3-89b3-1fd5d2f6e824-kube-api-access-hfgz7" (OuterVolumeSpecName: "kube-api-access-hfgz7") pod "d1176912-acea-4cd3-89b3-1fd5d2f6e824" (UID: "d1176912-acea-4cd3-89b3-1fd5d2f6e824"). InnerVolumeSpecName "kube-api-access-hfgz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.695175 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfgz7\" (UniqueName: \"kubernetes.io/projected/d1176912-acea-4cd3-89b3-1fd5d2f6e824-kube-api-access-hfgz7\") on node \"crc\" DevicePath \"\"" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.710759 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d1176912-acea-4cd3-89b3-1fd5d2f6e824" (UID: "d1176912-acea-4cd3-89b3-1fd5d2f6e824"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.798126 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1176912-acea-4cd3-89b3-1fd5d2f6e824-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.803263 4861 generic.go:334] "Generic (PLEG): container finished" podID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerID="1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e" exitCode=0 Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.803304 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm6zk" event={"ID":"d1176912-acea-4cd3-89b3-1fd5d2f6e824","Type":"ContainerDied","Data":"1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e"} Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.803336 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm6zk" event={"ID":"d1176912-acea-4cd3-89b3-1fd5d2f6e824","Type":"ContainerDied","Data":"2ca883fb36fbf6cf39ba5b2071ba4306a338331fe515477c3ac749458ba8f3af"} Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.803358 4861 scope.go:117] "RemoveContainer" containerID="1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.803332 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rm6zk" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.837625 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rm6zk"] Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.841198 4861 scope.go:117] "RemoveContainer" containerID="e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.847042 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rm6zk"] Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.870573 4861 scope.go:117] "RemoveContainer" containerID="697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.924842 4861 scope.go:117] "RemoveContainer" containerID="1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e" Jan 29 08:51:46 crc kubenswrapper[4861]: E0129 08:51:46.925513 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e\": container with ID starting with 1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e not found: ID does not exist" containerID="1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.925566 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e"} err="failed to get container status \"1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e\": rpc error: code = NotFound desc = could not find container \"1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e\": container with ID starting with 1fbe3e7b8b2029b1d25882e8f020cbeb95ef6cc89910511c218d54b4dcf0a54e not found: ID does not exist" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.925593 4861 scope.go:117] "RemoveContainer" containerID="e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42" Jan 29 08:51:46 crc kubenswrapper[4861]: E0129 08:51:46.925914 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42\": container with ID starting with e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42 not found: ID does not exist" containerID="e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.925949 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42"} err="failed to get container status \"e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42\": rpc error: code = NotFound desc = could not find container \"e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42\": container with ID starting with e8d5704243829c4a9756adb8707aea2a50efd0b2c203731246ce010035354d42 not found: ID does not exist" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.925976 4861 scope.go:117] "RemoveContainer" containerID="697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99" Jan 29 08:51:46 crc kubenswrapper[4861]: E0129 08:51:46.926342 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99\": container with ID starting with 697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99 not found: ID does not exist" containerID="697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99" Jan 29 08:51:46 crc kubenswrapper[4861]: I0129 08:51:46.926389 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99"} err="failed to get container status \"697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99\": rpc error: code = NotFound desc = could not find container \"697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99\": container with ID starting with 697eebae1a4f57d418d57b5857753a4166fae09531a5f9e25efe5d9196c33e99 not found: ID does not exist" Jan 29 08:51:47 crc kubenswrapper[4861]: I0129 08:51:47.127753 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" path="/var/lib/kubelet/pods/d1176912-acea-4cd3-89b3-1fd5d2f6e824/volumes" Jan 29 08:52:00 crc kubenswrapper[4861]: I0129 08:52:00.629734 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:52:00 crc kubenswrapper[4861]: I0129 08:52:00.630322 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:52:17 crc kubenswrapper[4861]: I0129 08:52:17.121975 4861 generic.go:334] "Generic (PLEG): container finished" podID="ef737a6a-1c77-460c-9152-952334d3ede1" containerID="4c8e14eacbc8472a129e795dfe44907dc98d5aeaf1cf19248184dade577302d0" exitCode=0 Jan 29 08:52:17 crc kubenswrapper[4861]: I0129 08:52:17.131336 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" event={"ID":"ef737a6a-1c77-460c-9152-952334d3ede1","Type":"ContainerDied","Data":"4c8e14eacbc8472a129e795dfe44907dc98d5aeaf1cf19248184dade577302d0"} Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.575965 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701222 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nq89\" (UniqueName: \"kubernetes.io/projected/ef737a6a-1c77-460c-9152-952334d3ede1-kube-api-access-7nq89\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701415 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-1\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701435 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-combined-ca-bundle\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701505 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-ssh-key-openstack-cell1\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701534 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-inventory\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701566 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-0\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701608 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-0\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701658 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-1\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.701681 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cells-global-config-0\") pod \"ef737a6a-1c77-460c-9152-952334d3ede1\" (UID: \"ef737a6a-1c77-460c-9152-952334d3ede1\") " Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.708377 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef737a6a-1c77-460c-9152-952334d3ede1-kube-api-access-7nq89" (OuterVolumeSpecName: "kube-api-access-7nq89") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "kube-api-access-7nq89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.715555 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.733991 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-inventory" (OuterVolumeSpecName: "inventory") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.734816 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.740401 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.741811 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.744734 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.752995 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.759181 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "ef737a6a-1c77-460c-9152-952334d3ede1" (UID: "ef737a6a-1c77-460c-9152-952334d3ede1"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.805029 4861 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.805522 4861 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.805620 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.805697 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.805770 4861 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.805843 4861 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.805951 4861 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ef737a6a-1c77-460c-9152-952334d3ede1-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.806025 4861 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ef737a6a-1c77-460c-9152-952334d3ede1-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:18 crc kubenswrapper[4861]: I0129 08:52:18.806138 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nq89\" (UniqueName: \"kubernetes.io/projected/ef737a6a-1c77-460c-9152-952334d3ede1-kube-api-access-7nq89\") on node \"crc\" DevicePath \"\"" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.150178 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" event={"ID":"ef737a6a-1c77-460c-9152-952334d3ede1","Type":"ContainerDied","Data":"468bf94eda8e30ac2f5983f61b38a11a578170e7bf32112eb60c16c22258d892"} Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.150220 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-vkvwl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.150226 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="468bf94eda8e30ac2f5983f61b38a11a578170e7bf32112eb60c16c22258d892" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.235640 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-4r8cl"] Jan 29 08:52:19 crc kubenswrapper[4861]: E0129 08:52:19.237289 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerName="extract-utilities" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.237402 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerName="extract-utilities" Jan 29 08:52:19 crc kubenswrapper[4861]: E0129 08:52:19.237495 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef737a6a-1c77-460c-9152-952334d3ede1" containerName="nova-cell1-openstack-openstack-cell1" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.237594 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef737a6a-1c77-460c-9152-952334d3ede1" containerName="nova-cell1-openstack-openstack-cell1" Jan 29 08:52:19 crc kubenswrapper[4861]: E0129 08:52:19.237679 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="extract-content" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.237749 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="extract-content" Jan 29 08:52:19 crc kubenswrapper[4861]: E0129 08:52:19.237823 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="extract-utilities" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.237889 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="extract-utilities" Jan 29 08:52:19 crc kubenswrapper[4861]: E0129 08:52:19.237971 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerName="registry-server" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.238036 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerName="registry-server" Jan 29 08:52:19 crc kubenswrapper[4861]: E0129 08:52:19.238196 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="registry-server" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.238265 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="registry-server" Jan 29 08:52:19 crc kubenswrapper[4861]: E0129 08:52:19.238343 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerName="extract-content" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.238422 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerName="extract-content" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.238788 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1176912-acea-4cd3-89b3-1fd5d2f6e824" containerName="registry-server" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.238916 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef737a6a-1c77-460c-9152-952334d3ede1" containerName="nova-cell1-openstack-openstack-cell1" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.239003 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa95fe3f-f16d-4700-95a5-755b84619eff" containerName="registry-server" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.240163 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.242279 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.242281 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.242284 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.245479 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.245816 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.252635 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-4r8cl"] Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.316761 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-inventory\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.317191 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.317279 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.317423 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.317489 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ssh-key-openstack-cell1\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.317571 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.317611 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9b56b\" (UniqueName: \"kubernetes.io/projected/146988b7-5ac0-4634-a55a-0e66bb5a624e-kube-api-access-9b56b\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.420378 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.420835 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.421145 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.421366 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ssh-key-openstack-cell1\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.421560 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.421695 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9b56b\" (UniqueName: \"kubernetes.io/projected/146988b7-5ac0-4634-a55a-0e66bb5a624e-kube-api-access-9b56b\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.421856 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-inventory\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.426811 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.427059 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-inventory\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.427323 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.427998 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.428318 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ssh-key-openstack-cell1\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.431403 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.440108 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9b56b\" (UniqueName: \"kubernetes.io/projected/146988b7-5ac0-4634-a55a-0e66bb5a624e-kube-api-access-9b56b\") pod \"telemetry-openstack-openstack-cell1-4r8cl\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:19 crc kubenswrapper[4861]: I0129 08:52:19.566522 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:52:20 crc kubenswrapper[4861]: I0129 08:52:20.141866 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-4r8cl"] Jan 29 08:52:20 crc kubenswrapper[4861]: I0129 08:52:20.164376 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" event={"ID":"146988b7-5ac0-4634-a55a-0e66bb5a624e","Type":"ContainerStarted","Data":"1ba7c1d39eb1bb3c96dc504a651cd6fa1809b19d861e7dc2b9066a234f1b8abe"} Jan 29 08:52:21 crc kubenswrapper[4861]: I0129 08:52:21.174360 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" event={"ID":"146988b7-5ac0-4634-a55a-0e66bb5a624e","Type":"ContainerStarted","Data":"9322e81ca746fe4ccadc2995b86e7c582697a7e98f1d96a748c4ff46ef6fcb91"} Jan 29 08:52:21 crc kubenswrapper[4861]: I0129 08:52:21.193500 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" podStartSLOduration=1.795829875 podStartE2EDuration="2.193482114s" podCreationTimestamp="2026-01-29 08:52:19 +0000 UTC" firstStartedPulling="2026-01-29 08:52:20.139235609 +0000 UTC m=+8231.810730166" lastFinishedPulling="2026-01-29 08:52:20.536887848 +0000 UTC m=+8232.208382405" observedRunningTime="2026-01-29 08:52:21.192547819 +0000 UTC m=+8232.864042396" watchObservedRunningTime="2026-01-29 08:52:21.193482114 +0000 UTC m=+8232.864976661" Jan 29 08:52:30 crc kubenswrapper[4861]: I0129 08:52:30.629436 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:52:30 crc kubenswrapper[4861]: I0129 08:52:30.630087 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:53:00 crc kubenswrapper[4861]: I0129 08:53:00.629862 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:53:00 crc kubenswrapper[4861]: I0129 08:53:00.630391 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:53:00 crc kubenswrapper[4861]: I0129 08:53:00.630435 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:53:00 crc kubenswrapper[4861]: I0129 08:53:00.631189 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"87606b0fe7e00e9d70953c778824d4c6bef40fc9cbeb821b1f5c869e6ae6b8a4"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:53:00 crc kubenswrapper[4861]: I0129 08:53:00.631238 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://87606b0fe7e00e9d70953c778824d4c6bef40fc9cbeb821b1f5c869e6ae6b8a4" gracePeriod=600 Jan 29 08:53:01 crc kubenswrapper[4861]: I0129 08:53:01.550929 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="87606b0fe7e00e9d70953c778824d4c6bef40fc9cbeb821b1f5c869e6ae6b8a4" exitCode=0 Jan 29 08:53:01 crc kubenswrapper[4861]: I0129 08:53:01.551015 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"87606b0fe7e00e9d70953c778824d4c6bef40fc9cbeb821b1f5c869e6ae6b8a4"} Jan 29 08:53:01 crc kubenswrapper[4861]: I0129 08:53:01.551431 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c"} Jan 29 08:53:01 crc kubenswrapper[4861]: I0129 08:53:01.551464 4861 scope.go:117] "RemoveContainer" containerID="3dd631e125ca396fc733f6e05a9085f8494cd8aff3725b3a7ff729fcf706c8af" Jan 29 08:55:00 crc kubenswrapper[4861]: I0129 08:55:00.630481 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:55:00 crc kubenswrapper[4861]: I0129 08:55:00.631194 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:55:30 crc kubenswrapper[4861]: I0129 08:55:30.630274 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:55:30 crc kubenswrapper[4861]: I0129 08:55:30.630820 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:55:30 crc kubenswrapper[4861]: I0129 08:55:30.951461 4861 generic.go:334] "Generic (PLEG): container finished" podID="146988b7-5ac0-4634-a55a-0e66bb5a624e" containerID="9322e81ca746fe4ccadc2995b86e7c582697a7e98f1d96a748c4ff46ef6fcb91" exitCode=0 Jan 29 08:55:30 crc kubenswrapper[4861]: I0129 08:55:30.951571 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" event={"ID":"146988b7-5ac0-4634-a55a-0e66bb5a624e","Type":"ContainerDied","Data":"9322e81ca746fe4ccadc2995b86e7c582697a7e98f1d96a748c4ff46ef6fcb91"} Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.426170 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.558167 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9b56b\" (UniqueName: \"kubernetes.io/projected/146988b7-5ac0-4634-a55a-0e66bb5a624e-kube-api-access-9b56b\") pod \"146988b7-5ac0-4634-a55a-0e66bb5a624e\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.559216 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-inventory\") pod \"146988b7-5ac0-4634-a55a-0e66bb5a624e\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.559274 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-1\") pod \"146988b7-5ac0-4634-a55a-0e66bb5a624e\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.559368 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-2\") pod \"146988b7-5ac0-4634-a55a-0e66bb5a624e\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.559405 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ssh-key-openstack-cell1\") pod \"146988b7-5ac0-4634-a55a-0e66bb5a624e\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.559431 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-0\") pod \"146988b7-5ac0-4634-a55a-0e66bb5a624e\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.559495 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-telemetry-combined-ca-bundle\") pod \"146988b7-5ac0-4634-a55a-0e66bb5a624e\" (UID: \"146988b7-5ac0-4634-a55a-0e66bb5a624e\") " Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.564500 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/146988b7-5ac0-4634-a55a-0e66bb5a624e-kube-api-access-9b56b" (OuterVolumeSpecName: "kube-api-access-9b56b") pod "146988b7-5ac0-4634-a55a-0e66bb5a624e" (UID: "146988b7-5ac0-4634-a55a-0e66bb5a624e"). InnerVolumeSpecName "kube-api-access-9b56b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.565251 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "146988b7-5ac0-4634-a55a-0e66bb5a624e" (UID: "146988b7-5ac0-4634-a55a-0e66bb5a624e"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.590634 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-inventory" (OuterVolumeSpecName: "inventory") pod "146988b7-5ac0-4634-a55a-0e66bb5a624e" (UID: "146988b7-5ac0-4634-a55a-0e66bb5a624e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.591558 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "146988b7-5ac0-4634-a55a-0e66bb5a624e" (UID: "146988b7-5ac0-4634-a55a-0e66bb5a624e"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.594888 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "146988b7-5ac0-4634-a55a-0e66bb5a624e" (UID: "146988b7-5ac0-4634-a55a-0e66bb5a624e"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.596622 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "146988b7-5ac0-4634-a55a-0e66bb5a624e" (UID: "146988b7-5ac0-4634-a55a-0e66bb5a624e"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.602407 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "146988b7-5ac0-4634-a55a-0e66bb5a624e" (UID: "146988b7-5ac0-4634-a55a-0e66bb5a624e"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.663096 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.663140 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.663151 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.663161 4861 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.663175 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9b56b\" (UniqueName: \"kubernetes.io/projected/146988b7-5ac0-4634-a55a-0e66bb5a624e-kube-api-access-9b56b\") on node \"crc\" DevicePath \"\"" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.663187 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.663199 4861 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/146988b7-5ac0-4634-a55a-0e66bb5a624e-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.973781 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" event={"ID":"146988b7-5ac0-4634-a55a-0e66bb5a624e","Type":"ContainerDied","Data":"1ba7c1d39eb1bb3c96dc504a651cd6fa1809b19d861e7dc2b9066a234f1b8abe"} Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.974169 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ba7c1d39eb1bb3c96dc504a651cd6fa1809b19d861e7dc2b9066a234f1b8abe" Jan 29 08:55:32 crc kubenswrapper[4861]: I0129 08:55:32.973936 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-4r8cl" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.081265 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-txpmz"] Jan 29 08:55:33 crc kubenswrapper[4861]: E0129 08:55:33.081702 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="146988b7-5ac0-4634-a55a-0e66bb5a624e" containerName="telemetry-openstack-openstack-cell1" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.081718 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="146988b7-5ac0-4634-a55a-0e66bb5a624e" containerName="telemetry-openstack-openstack-cell1" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.081907 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="146988b7-5ac0-4634-a55a-0e66bb5a624e" containerName="telemetry-openstack-openstack-cell1" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.082632 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.087561 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.088127 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.088519 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.089667 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.089667 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.096287 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-txpmz"] Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.176490 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.176624 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjmkt\" (UniqueName: \"kubernetes.io/projected/607f0b15-73a6-4554-a410-30135f075145-kube-api-access-fjmkt\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.176648 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.176705 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.176756 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-ssh-key-openstack-cell1\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.278873 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.279303 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjmkt\" (UniqueName: \"kubernetes.io/projected/607f0b15-73a6-4554-a410-30135f075145-kube-api-access-fjmkt\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.279433 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.279580 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.279699 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-ssh-key-openstack-cell1\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.284582 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-ssh-key-openstack-cell1\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.284765 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.285366 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.287693 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.297123 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjmkt\" (UniqueName: \"kubernetes.io/projected/607f0b15-73a6-4554-a410-30135f075145-kube-api-access-fjmkt\") pod \"neutron-sriov-openstack-openstack-cell1-txpmz\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.398555 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.920296 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-txpmz"] Jan 29 08:55:33 crc kubenswrapper[4861]: I0129 08:55:33.981675 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" event={"ID":"607f0b15-73a6-4554-a410-30135f075145","Type":"ContainerStarted","Data":"2d13d19d64f0db2484f86fdab55bc0ae40cbfdec0a252786b33b9fe65096cffa"} Jan 29 08:55:34 crc kubenswrapper[4861]: I0129 08:55:34.992592 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" event={"ID":"607f0b15-73a6-4554-a410-30135f075145","Type":"ContainerStarted","Data":"71c7ffbe20ba2e3b0d92fb9795b7257734c31aef659f72077fc311e4049d2a38"} Jan 29 08:55:35 crc kubenswrapper[4861]: I0129 08:55:35.021508 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" podStartSLOduration=1.462876684 podStartE2EDuration="2.021486559s" podCreationTimestamp="2026-01-29 08:55:33 +0000 UTC" firstStartedPulling="2026-01-29 08:55:33.929095967 +0000 UTC m=+8425.600590524" lastFinishedPulling="2026-01-29 08:55:34.487705832 +0000 UTC m=+8426.159200399" observedRunningTime="2026-01-29 08:55:35.017151515 +0000 UTC m=+8426.688646082" watchObservedRunningTime="2026-01-29 08:55:35.021486559 +0000 UTC m=+8426.692981126" Jan 29 08:56:00 crc kubenswrapper[4861]: I0129 08:56:00.629641 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 08:56:00 crc kubenswrapper[4861]: I0129 08:56:00.630217 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 08:56:00 crc kubenswrapper[4861]: I0129 08:56:00.630263 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 08:56:00 crc kubenswrapper[4861]: I0129 08:56:00.631118 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 08:56:00 crc kubenswrapper[4861]: I0129 08:56:00.631162 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" gracePeriod=600 Jan 29 08:56:00 crc kubenswrapper[4861]: E0129 08:56:00.765865 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:56:01 crc kubenswrapper[4861]: I0129 08:56:01.245285 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" exitCode=0 Jan 29 08:56:01 crc kubenswrapper[4861]: I0129 08:56:01.245325 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c"} Jan 29 08:56:01 crc kubenswrapper[4861]: I0129 08:56:01.245356 4861 scope.go:117] "RemoveContainer" containerID="87606b0fe7e00e9d70953c778824d4c6bef40fc9cbeb821b1f5c869e6ae6b8a4" Jan 29 08:56:01 crc kubenswrapper[4861]: I0129 08:56:01.245988 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:56:01 crc kubenswrapper[4861]: E0129 08:56:01.246276 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:56:15 crc kubenswrapper[4861]: I0129 08:56:15.117335 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:56:15 crc kubenswrapper[4861]: E0129 08:56:15.118140 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:56:28 crc kubenswrapper[4861]: I0129 08:56:28.117108 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:56:28 crc kubenswrapper[4861]: E0129 08:56:28.117836 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:56:33 crc kubenswrapper[4861]: I0129 08:56:33.549165 4861 generic.go:334] "Generic (PLEG): container finished" podID="607f0b15-73a6-4554-a410-30135f075145" containerID="71c7ffbe20ba2e3b0d92fb9795b7257734c31aef659f72077fc311e4049d2a38" exitCode=0 Jan 29 08:56:33 crc kubenswrapper[4861]: I0129 08:56:33.549285 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" event={"ID":"607f0b15-73a6-4554-a410-30135f075145","Type":"ContainerDied","Data":"71c7ffbe20ba2e3b0d92fb9795b7257734c31aef659f72077fc311e4049d2a38"} Jan 29 08:56:34 crc kubenswrapper[4861]: I0129 08:56:34.984343 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.044985 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-ssh-key-openstack-cell1\") pod \"607f0b15-73a6-4554-a410-30135f075145\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.045053 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-inventory\") pod \"607f0b15-73a6-4554-a410-30135f075145\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.045278 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-agent-neutron-config-0\") pod \"607f0b15-73a6-4554-a410-30135f075145\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.046263 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-combined-ca-bundle\") pod \"607f0b15-73a6-4554-a410-30135f075145\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.046984 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjmkt\" (UniqueName: \"kubernetes.io/projected/607f0b15-73a6-4554-a410-30135f075145-kube-api-access-fjmkt\") pod \"607f0b15-73a6-4554-a410-30135f075145\" (UID: \"607f0b15-73a6-4554-a410-30135f075145\") " Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.052812 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "607f0b15-73a6-4554-a410-30135f075145" (UID: "607f0b15-73a6-4554-a410-30135f075145"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.052932 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/607f0b15-73a6-4554-a410-30135f075145-kube-api-access-fjmkt" (OuterVolumeSpecName: "kube-api-access-fjmkt") pod "607f0b15-73a6-4554-a410-30135f075145" (UID: "607f0b15-73a6-4554-a410-30135f075145"). InnerVolumeSpecName "kube-api-access-fjmkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.078466 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-sriov-agent-neutron-config-0") pod "607f0b15-73a6-4554-a410-30135f075145" (UID: "607f0b15-73a6-4554-a410-30135f075145"). InnerVolumeSpecName "neutron-sriov-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.079219 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-inventory" (OuterVolumeSpecName: "inventory") pod "607f0b15-73a6-4554-a410-30135f075145" (UID: "607f0b15-73a6-4554-a410-30135f075145"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.079817 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "607f0b15-73a6-4554-a410-30135f075145" (UID: "607f0b15-73a6-4554-a410-30135f075145"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.150770 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.150821 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.150833 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.150842 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/607f0b15-73a6-4554-a410-30135f075145-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.150852 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjmkt\" (UniqueName: \"kubernetes.io/projected/607f0b15-73a6-4554-a410-30135f075145-kube-api-access-fjmkt\") on node \"crc\" DevicePath \"\"" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.578453 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" event={"ID":"607f0b15-73a6-4554-a410-30135f075145","Type":"ContainerDied","Data":"2d13d19d64f0db2484f86fdab55bc0ae40cbfdec0a252786b33b9fe65096cffa"} Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.578543 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d13d19d64f0db2484f86fdab55bc0ae40cbfdec0a252786b33b9fe65096cffa" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.578627 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-txpmz" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.771973 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb"] Jan 29 08:56:35 crc kubenswrapper[4861]: E0129 08:56:35.772632 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607f0b15-73a6-4554-a410-30135f075145" containerName="neutron-sriov-openstack-openstack-cell1" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.772655 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="607f0b15-73a6-4554-a410-30135f075145" containerName="neutron-sriov-openstack-openstack-cell1" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.772907 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="607f0b15-73a6-4554-a410-30135f075145" containerName="neutron-sriov-openstack-openstack-cell1" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.773850 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.777783 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-dhcp-agent-neutron-config" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.777983 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.778140 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.779619 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.784531 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.794511 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb"] Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.873841 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.873901 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.873939 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-ssh-key-openstack-cell1\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.874435 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.874862 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl9xn\" (UniqueName: \"kubernetes.io/projected/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-kube-api-access-rl9xn\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.977202 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.977546 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.977595 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-ssh-key-openstack-cell1\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.977723 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.977813 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl9xn\" (UniqueName: \"kubernetes.io/projected/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-kube-api-access-rl9xn\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.981907 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.982499 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.982828 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-ssh-key-openstack-cell1\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.984590 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:35 crc kubenswrapper[4861]: I0129 08:56:35.997029 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl9xn\" (UniqueName: \"kubernetes.io/projected/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-kube-api-access-rl9xn\") pod \"neutron-dhcp-openstack-openstack-cell1-qvqjb\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:36 crc kubenswrapper[4861]: I0129 08:56:36.106750 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:56:36 crc kubenswrapper[4861]: I0129 08:56:36.676729 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb"] Jan 29 08:56:36 crc kubenswrapper[4861]: I0129 08:56:36.680951 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 08:56:37 crc kubenswrapper[4861]: I0129 08:56:37.603056 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" event={"ID":"c90d0f7b-c4ef-4352-aef0-3f038ec6a922","Type":"ContainerStarted","Data":"8d7d64cbbe15f0ec4c2eb9cff5df34776eca2ee8fe8211b3de1cd995ce8956a1"} Jan 29 08:56:38 crc kubenswrapper[4861]: I0129 08:56:38.613686 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" event={"ID":"c90d0f7b-c4ef-4352-aef0-3f038ec6a922","Type":"ContainerStarted","Data":"8d6d6721ac9d27e12544c9e04a9737d8aab0bfaafc5b4222dac474e226bf15db"} Jan 29 08:56:38 crc kubenswrapper[4861]: I0129 08:56:38.634317 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" podStartSLOduration=2.8325660790000002 podStartE2EDuration="3.634296693s" podCreationTimestamp="2026-01-29 08:56:35 +0000 UTC" firstStartedPulling="2026-01-29 08:56:36.680640283 +0000 UTC m=+8488.352134840" lastFinishedPulling="2026-01-29 08:56:37.482370897 +0000 UTC m=+8489.153865454" observedRunningTime="2026-01-29 08:56:38.626873746 +0000 UTC m=+8490.298368303" watchObservedRunningTime="2026-01-29 08:56:38.634296693 +0000 UTC m=+8490.305791250" Jan 29 08:56:39 crc kubenswrapper[4861]: I0129 08:56:39.131450 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:56:39 crc kubenswrapper[4861]: E0129 08:56:39.132041 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:56:53 crc kubenswrapper[4861]: I0129 08:56:53.116262 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:56:53 crc kubenswrapper[4861]: E0129 08:56:53.117290 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:57:06 crc kubenswrapper[4861]: I0129 08:57:06.116994 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:57:06 crc kubenswrapper[4861]: E0129 08:57:06.118622 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:57:17 crc kubenswrapper[4861]: I0129 08:57:17.118011 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:57:17 crc kubenswrapper[4861]: E0129 08:57:17.118672 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:57:32 crc kubenswrapper[4861]: I0129 08:57:32.116376 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:57:32 crc kubenswrapper[4861]: E0129 08:57:32.117099 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:57:43 crc kubenswrapper[4861]: I0129 08:57:43.116978 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:57:43 crc kubenswrapper[4861]: E0129 08:57:43.117783 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:57:46 crc kubenswrapper[4861]: I0129 08:57:46.262705 4861 generic.go:334] "Generic (PLEG): container finished" podID="c90d0f7b-c4ef-4352-aef0-3f038ec6a922" containerID="8d6d6721ac9d27e12544c9e04a9737d8aab0bfaafc5b4222dac474e226bf15db" exitCode=0 Jan 29 08:57:46 crc kubenswrapper[4861]: I0129 08:57:46.262792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" event={"ID":"c90d0f7b-c4ef-4352-aef0-3f038ec6a922","Type":"ContainerDied","Data":"8d6d6721ac9d27e12544c9e04a9737d8aab0bfaafc5b4222dac474e226bf15db"} Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.709508 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.816353 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-agent-neutron-config-0\") pod \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.816504 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-ssh-key-openstack-cell1\") pod \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.816557 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rl9xn\" (UniqueName: \"kubernetes.io/projected/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-kube-api-access-rl9xn\") pod \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.816607 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-combined-ca-bundle\") pod \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.816718 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-inventory\") pod \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\" (UID: \"c90d0f7b-c4ef-4352-aef0-3f038ec6a922\") " Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.822513 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "c90d0f7b-c4ef-4352-aef0-3f038ec6a922" (UID: "c90d0f7b-c4ef-4352-aef0-3f038ec6a922"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.822907 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-kube-api-access-rl9xn" (OuterVolumeSpecName: "kube-api-access-rl9xn") pod "c90d0f7b-c4ef-4352-aef0-3f038ec6a922" (UID: "c90d0f7b-c4ef-4352-aef0-3f038ec6a922"). InnerVolumeSpecName "kube-api-access-rl9xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.855423 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-inventory" (OuterVolumeSpecName: "inventory") pod "c90d0f7b-c4ef-4352-aef0-3f038ec6a922" (UID: "c90d0f7b-c4ef-4352-aef0-3f038ec6a922"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.861865 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "c90d0f7b-c4ef-4352-aef0-3f038ec6a922" (UID: "c90d0f7b-c4ef-4352-aef0-3f038ec6a922"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.863837 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-dhcp-agent-neutron-config-0") pod "c90d0f7b-c4ef-4352-aef0-3f038ec6a922" (UID: "c90d0f7b-c4ef-4352-aef0-3f038ec6a922"). InnerVolumeSpecName "neutron-dhcp-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.919934 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.920186 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.920274 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rl9xn\" (UniqueName: \"kubernetes.io/projected/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-kube-api-access-rl9xn\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.920353 4861 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:47 crc kubenswrapper[4861]: I0129 08:57:47.920432 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c90d0f7b-c4ef-4352-aef0-3f038ec6a922-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:48 crc kubenswrapper[4861]: I0129 08:57:48.285987 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" event={"ID":"c90d0f7b-c4ef-4352-aef0-3f038ec6a922","Type":"ContainerDied","Data":"8d7d64cbbe15f0ec4c2eb9cff5df34776eca2ee8fe8211b3de1cd995ce8956a1"} Jan 29 08:57:48 crc kubenswrapper[4861]: I0129 08:57:48.286312 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d7d64cbbe15f0ec4c2eb9cff5df34776eca2ee8fe8211b3de1cd995ce8956a1" Jan 29 08:57:48 crc kubenswrapper[4861]: I0129 08:57:48.286016 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-qvqjb" Jan 29 08:57:53 crc kubenswrapper[4861]: I0129 08:57:53.828141 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:57:53 crc kubenswrapper[4861]: I0129 08:57:53.831727 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" containerName="nova-cell0-conductor-conductor" containerID="cri-o://7de32205f9a2e36a4c13ff50204796b23961557a0a39dfa34d29db1510f2860a" gracePeriod=30 Jan 29 08:57:53 crc kubenswrapper[4861]: I0129 08:57:53.862425 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:57:53 crc kubenswrapper[4861]: I0129 08:57:53.862930 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="a83392de-0747-452a-a609-8e4c63b0f13e" containerName="nova-cell1-conductor-conductor" containerID="cri-o://f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f" gracePeriod=30 Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.116547 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:57:54 crc kubenswrapper[4861]: E0129 08:57:54.116829 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.362439 4861 generic.go:334] "Generic (PLEG): container finished" podID="0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" containerID="7de32205f9a2e36a4c13ff50204796b23961557a0a39dfa34d29db1510f2860a" exitCode=0 Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.362491 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5","Type":"ContainerDied","Data":"7de32205f9a2e36a4c13ff50204796b23961557a0a39dfa34d29db1510f2860a"} Jan 29 08:57:54 crc kubenswrapper[4861]: E0129 08:57:54.766806 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 08:57:54 crc kubenswrapper[4861]: E0129 08:57:54.769144 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 08:57:54 crc kubenswrapper[4861]: E0129 08:57:54.770350 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 08:57:54 crc kubenswrapper[4861]: E0129 08:57:54.770430 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="a83392de-0747-452a-a609-8e4c63b0f13e" containerName="nova-cell1-conductor-conductor" Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.787864 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.887926 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsmzl\" (UniqueName: \"kubernetes.io/projected/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-kube-api-access-qsmzl\") pod \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.888856 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-combined-ca-bundle\") pod \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.889475 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-config-data\") pod \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\" (UID: \"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5\") " Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.899038 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-kube-api-access-qsmzl" (OuterVolumeSpecName: "kube-api-access-qsmzl") pod "0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" (UID: "0f7bff6c-0f2a-4084-be0d-4ec9612e37c5"). InnerVolumeSpecName "kube-api-access-qsmzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.921538 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-config-data" (OuterVolumeSpecName: "config-data") pod "0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" (UID: "0f7bff6c-0f2a-4084-be0d-4ec9612e37c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.923939 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" (UID: "0f7bff6c-0f2a-4084-be0d-4ec9612e37c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.992607 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.992654 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:54 crc kubenswrapper[4861]: I0129 08:57:54.992667 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsmzl\" (UniqueName: \"kubernetes.io/projected/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5-kube-api-access-qsmzl\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.220362 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.222693 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6775a96e-88ab-471c-9abb-9a981572d243" containerName="nova-scheduler-scheduler" containerID="cri-o://4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501" gracePeriod=30 Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.234642 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.235243 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-log" containerID="cri-o://8d4806e15389b2f60659d50931356fc913f5b09fbe06a4580139e0adf4ebff4c" gracePeriod=30 Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.235800 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-api" containerID="cri-o://e894601e5fef0d614b3a9c749f0cd44610e778b1b1c3366627e68905994e0908" gracePeriod=30 Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.251891 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.252201 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-log" containerID="cri-o://a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028" gracePeriod=30 Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.252561 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-metadata" containerID="cri-o://e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70" gracePeriod=30 Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.373686 4861 generic.go:334] "Generic (PLEG): container finished" podID="14c47dd3-3866-4834-b719-5f8494904ea4" containerID="8d4806e15389b2f60659d50931356fc913f5b09fbe06a4580139e0adf4ebff4c" exitCode=143 Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.373801 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"14c47dd3-3866-4834-b719-5f8494904ea4","Type":"ContainerDied","Data":"8d4806e15389b2f60659d50931356fc913f5b09fbe06a4580139e0adf4ebff4c"} Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.375680 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0f7bff6c-0f2a-4084-be0d-4ec9612e37c5","Type":"ContainerDied","Data":"103ca54a8a41f4a71ab03b7a3166eda366770a40a98e8ef6c2e088ee0b7c2b1a"} Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.375736 4861 scope.go:117] "RemoveContainer" containerID="7de32205f9a2e36a4c13ff50204796b23961557a0a39dfa34d29db1510f2860a" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.375902 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.446344 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.463664 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.477010 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:57:55 crc kubenswrapper[4861]: E0129 08:57:55.477659 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" containerName="nova-cell0-conductor-conductor" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.477684 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" containerName="nova-cell0-conductor-conductor" Jan 29 08:57:55 crc kubenswrapper[4861]: E0129 08:57:55.477734 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c90d0f7b-c4ef-4352-aef0-3f038ec6a922" containerName="neutron-dhcp-openstack-openstack-cell1" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.477746 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="c90d0f7b-c4ef-4352-aef0-3f038ec6a922" containerName="neutron-dhcp-openstack-openstack-cell1" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.478002 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="c90d0f7b-c4ef-4352-aef0-3f038ec6a922" containerName="neutron-dhcp-openstack-openstack-cell1" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.478023 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" containerName="nova-cell0-conductor-conductor" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.478931 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.481084 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.488729 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.504292 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.504679 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.504834 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd2pj\" (UniqueName: \"kubernetes.io/projected/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-kube-api-access-qd2pj\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.606593 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.606650 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.606693 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd2pj\" (UniqueName: \"kubernetes.io/projected/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-kube-api-access-qd2pj\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.625253 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.625779 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.650806 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd2pj\" (UniqueName: \"kubernetes.io/projected/b42f984f-e0f7-4e8a-91d4-ca1443d529b1-kube-api-access-qd2pj\") pod \"nova-cell0-conductor-0\" (UID: \"b42f984f-e0f7-4e8a-91d4-ca1443d529b1\") " pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:55 crc kubenswrapper[4861]: I0129 08:57:55.837871 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:56 crc kubenswrapper[4861]: I0129 08:57:56.407013 4861 generic.go:334] "Generic (PLEG): container finished" podID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerID="a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028" exitCode=143 Jan 29 08:57:56 crc kubenswrapper[4861]: I0129 08:57:56.407149 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310","Type":"ContainerDied","Data":"a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028"} Jan 29 08:57:56 crc kubenswrapper[4861]: W0129 08:57:56.434887 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb42f984f_e0f7_4e8a_91d4_ca1443d529b1.slice/crio-6111f189c435ce83920abdd875ff5aed98a779d9aea9b8cfb22609c9be7b2ea1 WatchSource:0}: Error finding container 6111f189c435ce83920abdd875ff5aed98a779d9aea9b8cfb22609c9be7b2ea1: Status 404 returned error can't find the container with id 6111f189c435ce83920abdd875ff5aed98a779d9aea9b8cfb22609c9be7b2ea1 Jan 29 08:57:56 crc kubenswrapper[4861]: I0129 08:57:56.436888 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.021246 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.129805 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f7bff6c-0f2a-4084-be0d-4ec9612e37c5" path="/var/lib/kubelet/pods/0f7bff6c-0f2a-4084-be0d-4ec9612e37c5/volumes" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.165344 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-combined-ca-bundle\") pod \"a83392de-0747-452a-a609-8e4c63b0f13e\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.165427 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4452h\" (UniqueName: \"kubernetes.io/projected/a83392de-0747-452a-a609-8e4c63b0f13e-kube-api-access-4452h\") pod \"a83392de-0747-452a-a609-8e4c63b0f13e\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.165497 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-config-data\") pod \"a83392de-0747-452a-a609-8e4c63b0f13e\" (UID: \"a83392de-0747-452a-a609-8e4c63b0f13e\") " Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.172101 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a83392de-0747-452a-a609-8e4c63b0f13e-kube-api-access-4452h" (OuterVolumeSpecName: "kube-api-access-4452h") pod "a83392de-0747-452a-a609-8e4c63b0f13e" (UID: "a83392de-0747-452a-a609-8e4c63b0f13e"). InnerVolumeSpecName "kube-api-access-4452h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.202268 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-config-data" (OuterVolumeSpecName: "config-data") pod "a83392de-0747-452a-a609-8e4c63b0f13e" (UID: "a83392de-0747-452a-a609-8e4c63b0f13e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.208440 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a83392de-0747-452a-a609-8e4c63b0f13e" (UID: "a83392de-0747-452a-a609-8e4c63b0f13e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:57 crc kubenswrapper[4861]: E0129 08:57:57.210443 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 08:57:57 crc kubenswrapper[4861]: E0129 08:57:57.214325 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 08:57:57 crc kubenswrapper[4861]: E0129 08:57:57.215762 4861 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 08:57:57 crc kubenswrapper[4861]: E0129 08:57:57.215813 4861 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="6775a96e-88ab-471c-9abb-9a981572d243" containerName="nova-scheduler-scheduler" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.270517 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.270554 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4452h\" (UniqueName: \"kubernetes.io/projected/a83392de-0747-452a-a609-8e4c63b0f13e-kube-api-access-4452h\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.270567 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83392de-0747-452a-a609-8e4c63b0f13e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.429440 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b42f984f-e0f7-4e8a-91d4-ca1443d529b1","Type":"ContainerStarted","Data":"03276675d393f19f87ec1ec2276b9c0c53c86bb8f32b611f5c429d421c5b0348"} Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.429727 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b42f984f-e0f7-4e8a-91d4-ca1443d529b1","Type":"ContainerStarted","Data":"6111f189c435ce83920abdd875ff5aed98a779d9aea9b8cfb22609c9be7b2ea1"} Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.431168 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.433136 4861 generic.go:334] "Generic (PLEG): container finished" podID="a83392de-0747-452a-a609-8e4c63b0f13e" containerID="f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f" exitCode=0 Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.433186 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a83392de-0747-452a-a609-8e4c63b0f13e","Type":"ContainerDied","Data":"f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f"} Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.433218 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a83392de-0747-452a-a609-8e4c63b0f13e","Type":"ContainerDied","Data":"7c85ece55532297595402ce13987a386451afd5ec7cc9fea3ad3216ebe962100"} Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.433239 4861 scope.go:117] "RemoveContainer" containerID="f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.433369 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.466610 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.466588668 podStartE2EDuration="2.466588668s" podCreationTimestamp="2026-01-29 08:57:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:57:57.458606037 +0000 UTC m=+8569.130100604" watchObservedRunningTime="2026-01-29 08:57:57.466588668 +0000 UTC m=+8569.138083225" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.531690 4861 scope.go:117] "RemoveContainer" containerID="f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f" Jan 29 08:57:57 crc kubenswrapper[4861]: E0129 08:57:57.532313 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f\": container with ID starting with f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f not found: ID does not exist" containerID="f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.532369 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f"} err="failed to get container status \"f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f\": rpc error: code = NotFound desc = could not find container \"f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f\": container with ID starting with f61e46946eaea8ee0c94aa96d38eb9ee73292feedc57af543024118bb2bb909f not found: ID does not exist" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.550366 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.563381 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.571804 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:57:57 crc kubenswrapper[4861]: E0129 08:57:57.572453 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a83392de-0747-452a-a609-8e4c63b0f13e" containerName="nova-cell1-conductor-conductor" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.572479 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a83392de-0747-452a-a609-8e4c63b0f13e" containerName="nova-cell1-conductor-conductor" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.572708 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a83392de-0747-452a-a609-8e4c63b0f13e" containerName="nova-cell1-conductor-conductor" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.573485 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.575996 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.601670 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.681823 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97ba244c-c0ff-41ab-8e3b-f566ecef325d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.681897 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdvgx\" (UniqueName: \"kubernetes.io/projected/97ba244c-c0ff-41ab-8e3b-f566ecef325d-kube-api-access-tdvgx\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.682005 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97ba244c-c0ff-41ab-8e3b-f566ecef325d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.784538 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97ba244c-c0ff-41ab-8e3b-f566ecef325d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.784660 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdvgx\" (UniqueName: \"kubernetes.io/projected/97ba244c-c0ff-41ab-8e3b-f566ecef325d-kube-api-access-tdvgx\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.784684 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97ba244c-c0ff-41ab-8e3b-f566ecef325d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.789161 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97ba244c-c0ff-41ab-8e3b-f566ecef325d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.791485 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97ba244c-c0ff-41ab-8e3b-f566ecef325d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.803666 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdvgx\" (UniqueName: \"kubernetes.io/projected/97ba244c-c0ff-41ab-8e3b-f566ecef325d-kube-api-access-tdvgx\") pod \"nova-cell1-conductor-0\" (UID: \"97ba244c-c0ff-41ab-8e3b-f566ecef325d\") " pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:57 crc kubenswrapper[4861]: I0129 08:57:57.897650 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:58 crc kubenswrapper[4861]: I0129 08:57:58.424179 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 08:57:58 crc kubenswrapper[4861]: W0129 08:57:58.428148 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97ba244c_c0ff_41ab_8e3b_f566ecef325d.slice/crio-c0cf034a616b7fb457c58bfe6430eb64caa546a60e18128286a4c9fa2eba3287 WatchSource:0}: Error finding container c0cf034a616b7fb457c58bfe6430eb64caa546a60e18128286a4c9fa2eba3287: Status 404 returned error can't find the container with id c0cf034a616b7fb457c58bfe6430eb64caa546a60e18128286a4c9fa2eba3287 Jan 29 08:57:58 crc kubenswrapper[4861]: I0129 08:57:58.445264 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"97ba244c-c0ff-41ab-8e3b-f566ecef325d","Type":"ContainerStarted","Data":"c0cf034a616b7fb457c58bfe6430eb64caa546a60e18128286a4c9fa2eba3287"} Jan 29 08:57:58 crc kubenswrapper[4861]: I0129 08:57:58.941411 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.98:8775/\": dial tcp 10.217.1.98:8775: connect: connection refused" Jan 29 08:57:58 crc kubenswrapper[4861]: I0129 08:57:58.941465 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.98:8775/\": dial tcp 10.217.1.98:8775: connect: connection refused" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.143452 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a83392de-0747-452a-a609-8e4c63b0f13e" path="/var/lib/kubelet/pods/a83392de-0747-452a-a609-8e4c63b0f13e/volumes" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.370561 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.438901 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-combined-ca-bundle\") pod \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.438975 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-logs\") pod \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.439009 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlxqq\" (UniqueName: \"kubernetes.io/projected/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-kube-api-access-xlxqq\") pod \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.439284 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-nova-metadata-tls-certs\") pod \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.439488 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-config-data\") pod \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\" (UID: \"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310\") " Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.445209 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-logs" (OuterVolumeSpecName: "logs") pod "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" (UID: "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.447976 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-kube-api-access-xlxqq" (OuterVolumeSpecName: "kube-api-access-xlxqq") pod "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" (UID: "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310"). InnerVolumeSpecName "kube-api-access-xlxqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.484979 4861 generic.go:334] "Generic (PLEG): container finished" podID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerID="e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70" exitCode=0 Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.485063 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310","Type":"ContainerDied","Data":"e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70"} Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.485113 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c209a6d-c4ef-4a1c-ae11-8bf9087e3310","Type":"ContainerDied","Data":"87390450b98be59a9669e95a98f099bec586bebc21fbd6105907cbe28a915fb6"} Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.485157 4861 scope.go:117] "RemoveContainer" containerID="e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.485205 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.494523 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"97ba244c-c0ff-41ab-8e3b-f566ecef325d","Type":"ContainerStarted","Data":"cfd1e4593950ffe6fa9f08f34ddc4acbe4058f2b72aaa478ae11cd39c4d8ce0b"} Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.496197 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.510627 4861 generic.go:334] "Generic (PLEG): container finished" podID="14c47dd3-3866-4834-b719-5f8494904ea4" containerID="e894601e5fef0d614b3a9c749f0cd44610e778b1b1c3366627e68905994e0908" exitCode=0 Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.510720 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"14c47dd3-3866-4834-b719-5f8494904ea4","Type":"ContainerDied","Data":"e894601e5fef0d614b3a9c749f0cd44610e778b1b1c3366627e68905994e0908"} Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.520355 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-config-data" (OuterVolumeSpecName: "config-data") pod "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" (UID: "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.527975 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.5279509669999998 podStartE2EDuration="2.527950967s" podCreationTimestamp="2026-01-29 08:57:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:57:59.521747812 +0000 UTC m=+8571.193242379" watchObservedRunningTime="2026-01-29 08:57:59.527950967 +0000 UTC m=+8571.199445534" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.536859 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" (UID: "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.542056 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.542117 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.542130 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.542139 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlxqq\" (UniqueName: \"kubernetes.io/projected/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-kube-api-access-xlxqq\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.544147 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" (UID: "5c209a6d-c4ef-4a1c-ae11-8bf9087e3310"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.644741 4861 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.666660 4861 scope.go:117] "RemoveContainer" containerID="a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.707886 4861 scope.go:117] "RemoveContainer" containerID="e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70" Jan 29 08:57:59 crc kubenswrapper[4861]: E0129 08:57:59.708515 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70\": container with ID starting with e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70 not found: ID does not exist" containerID="e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.708560 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70"} err="failed to get container status \"e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70\": rpc error: code = NotFound desc = could not find container \"e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70\": container with ID starting with e202c06c7db5ba77aa2b52de51562d44a327b1675be47ec13cf9a4d454777c70 not found: ID does not exist" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.708588 4861 scope.go:117] "RemoveContainer" containerID="a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028" Jan 29 08:57:59 crc kubenswrapper[4861]: E0129 08:57:59.708916 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028\": container with ID starting with a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028 not found: ID does not exist" containerID="a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.708971 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028"} err="failed to get container status \"a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028\": rpc error: code = NotFound desc = could not find container \"a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028\": container with ID starting with a115e85cbd89de330975957ddc73a83bb3d29eee704bf1d0f6e9fb4362429028 not found: ID does not exist" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.900152 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.900682 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.915299 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.957159 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:57:59 crc kubenswrapper[4861]: E0129 08:57:59.957726 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-log" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.957746 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-log" Jan 29 08:57:59 crc kubenswrapper[4861]: E0129 08:57:59.957779 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-metadata" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.957786 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-metadata" Jan 29 08:57:59 crc kubenswrapper[4861]: E0129 08:57:59.957813 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-api" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.957821 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-api" Jan 29 08:57:59 crc kubenswrapper[4861]: E0129 08:57:59.957839 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-log" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.957846 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-log" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.958063 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-log" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.958106 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-log" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.958117 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" containerName="nova-api-api" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.958129 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" containerName="nova-metadata-metadata" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.959564 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.972685 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 08:57:59 crc kubenswrapper[4861]: I0129 08:57:59.973043 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.017299 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.055405 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14c47dd3-3866-4834-b719-5f8494904ea4-logs\") pod \"14c47dd3-3866-4834-b719-5f8494904ea4\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.055521 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-internal-tls-certs\") pod \"14c47dd3-3866-4834-b719-5f8494904ea4\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.055676 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-public-tls-certs\") pod \"14c47dd3-3866-4834-b719-5f8494904ea4\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.055725 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-config-data\") pod \"14c47dd3-3866-4834-b719-5f8494904ea4\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.055755 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dp9f8\" (UniqueName: \"kubernetes.io/projected/14c47dd3-3866-4834-b719-5f8494904ea4-kube-api-access-dp9f8\") pod \"14c47dd3-3866-4834-b719-5f8494904ea4\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.055961 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-combined-ca-bundle\") pod \"14c47dd3-3866-4834-b719-5f8494904ea4\" (UID: \"14c47dd3-3866-4834-b719-5f8494904ea4\") " Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.063319 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.063870 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2tcb\" (UniqueName: \"kubernetes.io/projected/e34de8ff-fa76-4858-99f8-b5c8365adf63-kube-api-access-k2tcb\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.063921 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-config-data\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.064537 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.064587 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e34de8ff-fa76-4858-99f8-b5c8365adf63-logs\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.087145 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14c47dd3-3866-4834-b719-5f8494904ea4-logs" (OuterVolumeSpecName: "logs") pod "14c47dd3-3866-4834-b719-5f8494904ea4" (UID: "14c47dd3-3866-4834-b719-5f8494904ea4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.150372 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14c47dd3-3866-4834-b719-5f8494904ea4-kube-api-access-dp9f8" (OuterVolumeSpecName: "kube-api-access-dp9f8") pod "14c47dd3-3866-4834-b719-5f8494904ea4" (UID: "14c47dd3-3866-4834-b719-5f8494904ea4"). InnerVolumeSpecName "kube-api-access-dp9f8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.166877 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.166937 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e34de8ff-fa76-4858-99f8-b5c8365adf63-logs\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.167060 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.167161 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2tcb\" (UniqueName: \"kubernetes.io/projected/e34de8ff-fa76-4858-99f8-b5c8365adf63-kube-api-access-k2tcb\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.167192 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-config-data\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.167307 4861 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14c47dd3-3866-4834-b719-5f8494904ea4-logs\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.167326 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dp9f8\" (UniqueName: \"kubernetes.io/projected/14c47dd3-3866-4834-b719-5f8494904ea4-kube-api-access-dp9f8\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.167599 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e34de8ff-fa76-4858-99f8-b5c8365adf63-logs\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.188757 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-config-data\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.199976 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.200458 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e34de8ff-fa76-4858-99f8-b5c8365adf63-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.211719 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2tcb\" (UniqueName: \"kubernetes.io/projected/e34de8ff-fa76-4858-99f8-b5c8365adf63-kube-api-access-k2tcb\") pod \"nova-metadata-0\" (UID: \"e34de8ff-fa76-4858-99f8-b5c8365adf63\") " pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.218567 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "14c47dd3-3866-4834-b719-5f8494904ea4" (UID: "14c47dd3-3866-4834-b719-5f8494904ea4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.224312 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "14c47dd3-3866-4834-b719-5f8494904ea4" (UID: "14c47dd3-3866-4834-b719-5f8494904ea4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.239298 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-config-data" (OuterVolumeSpecName: "config-data") pod "14c47dd3-3866-4834-b719-5f8494904ea4" (UID: "14c47dd3-3866-4834-b719-5f8494904ea4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.270183 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "14c47dd3-3866-4834-b719-5f8494904ea4" (UID: "14c47dd3-3866-4834-b719-5f8494904ea4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.271993 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.272032 4861 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.272045 4861 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.272057 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14c47dd3-3866-4834-b719-5f8494904ea4-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.291398 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.532275 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"14c47dd3-3866-4834-b719-5f8494904ea4","Type":"ContainerDied","Data":"b48117a68fccef866a6daa85deed4d6fa75cd0de31a894035cbbda9c45f121a5"} Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.532695 4861 scope.go:117] "RemoveContainer" containerID="e894601e5fef0d614b3a9c749f0cd44610e778b1b1c3366627e68905994e0908" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.532290 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.593673 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.608840 4861 scope.go:117] "RemoveContainer" containerID="8d4806e15389b2f60659d50931356fc913f5b09fbe06a4580139e0adf4ebff4c" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.622002 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.636761 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.639649 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.645611 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.645799 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.646045 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.649017 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.783956 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-internal-tls-certs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.784157 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dvnk\" (UniqueName: \"kubernetes.io/projected/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-kube-api-access-9dvnk\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.784356 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-logs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.784423 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-public-tls-certs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.784716 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.784864 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-config-data\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.887100 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dvnk\" (UniqueName: \"kubernetes.io/projected/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-kube-api-access-9dvnk\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.887342 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-logs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.887461 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-public-tls-certs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.887639 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.887756 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-config-data\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.887893 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-internal-tls-certs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.892158 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.893289 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-logs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.898891 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-public-tls-certs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.898974 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-internal-tls-certs\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.898984 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.902119 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-config-data\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.917264 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dvnk\" (UniqueName: \"kubernetes.io/projected/39d2b1c9-7a75-415a-90ff-cd7a10fb7a20-kube-api-access-9dvnk\") pod \"nova-api-0\" (UID: \"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20\") " pod="openstack/nova-api-0" Jan 29 08:58:00 crc kubenswrapper[4861]: I0129 08:58:00.976699 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.132575 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14c47dd3-3866-4834-b719-5f8494904ea4" path="/var/lib/kubelet/pods/14c47dd3-3866-4834-b719-5f8494904ea4/volumes" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.134086 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c209a6d-c4ef-4a1c-ae11-8bf9087e3310" path="/var/lib/kubelet/pods/5c209a6d-c4ef-4a1c-ae11-8bf9087e3310/volumes" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.512448 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.556865 4861 generic.go:334] "Generic (PLEG): container finished" podID="6775a96e-88ab-471c-9abb-9a981572d243" containerID="4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501" exitCode=0 Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.556922 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6775a96e-88ab-471c-9abb-9a981572d243","Type":"ContainerDied","Data":"4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501"} Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.556952 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6775a96e-88ab-471c-9abb-9a981572d243","Type":"ContainerDied","Data":"599739d502bb78a0af0a216d714e3423bc5dc3168e733b69f5b56aef8e626963"} Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.556968 4861 scope.go:117] "RemoveContainer" containerID="4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.557086 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.572603 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e34de8ff-fa76-4858-99f8-b5c8365adf63","Type":"ContainerStarted","Data":"aead88292032514c30bfe0e45c6a8cda6b06a3351096bceaa5658e16ac71cf85"} Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.572676 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e34de8ff-fa76-4858-99f8-b5c8365adf63","Type":"ContainerStarted","Data":"4e430745c97f81dcb5e5bf9df1c076ef0716f2fa24cf00b6277ddd51b4052a02"} Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.609719 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwnhq\" (UniqueName: \"kubernetes.io/projected/6775a96e-88ab-471c-9abb-9a981572d243-kube-api-access-jwnhq\") pod \"6775a96e-88ab-471c-9abb-9a981572d243\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.610506 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-combined-ca-bundle\") pod \"6775a96e-88ab-471c-9abb-9a981572d243\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.610616 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-config-data\") pod \"6775a96e-88ab-471c-9abb-9a981572d243\" (UID: \"6775a96e-88ab-471c-9abb-9a981572d243\") " Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.620314 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6775a96e-88ab-471c-9abb-9a981572d243-kube-api-access-jwnhq" (OuterVolumeSpecName: "kube-api-access-jwnhq") pod "6775a96e-88ab-471c-9abb-9a981572d243" (UID: "6775a96e-88ab-471c-9abb-9a981572d243"). InnerVolumeSpecName "kube-api-access-jwnhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.639226 4861 scope.go:117] "RemoveContainer" containerID="4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501" Jan 29 08:58:01 crc kubenswrapper[4861]: E0129 08:58:01.642670 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501\": container with ID starting with 4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501 not found: ID does not exist" containerID="4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.642717 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501"} err="failed to get container status \"4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501\": rpc error: code = NotFound desc = could not find container \"4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501\": container with ID starting with 4ad53c915076729aae500a7802f92724814eaee34de702dea41f54b84a922501 not found: ID does not exist" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.674541 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.694083 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-config-data" (OuterVolumeSpecName: "config-data") pod "6775a96e-88ab-471c-9abb-9a981572d243" (UID: "6775a96e-88ab-471c-9abb-9a981572d243"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.694269 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6775a96e-88ab-471c-9abb-9a981572d243" (UID: "6775a96e-88ab-471c-9abb-9a981572d243"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.714476 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.714510 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6775a96e-88ab-471c-9abb-9a981572d243-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.714520 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwnhq\" (UniqueName: \"kubernetes.io/projected/6775a96e-88ab-471c-9abb-9a981572d243-kube-api-access-jwnhq\") on node \"crc\" DevicePath \"\"" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.898514 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.933238 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.945479 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:58:01 crc kubenswrapper[4861]: E0129 08:58:01.946084 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6775a96e-88ab-471c-9abb-9a981572d243" containerName="nova-scheduler-scheduler" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.946103 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="6775a96e-88ab-471c-9abb-9a981572d243" containerName="nova-scheduler-scheduler" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.946327 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="6775a96e-88ab-471c-9abb-9a981572d243" containerName="nova-scheduler-scheduler" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.947011 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.949507 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 08:58:01 crc kubenswrapper[4861]: I0129 08:58:01.956207 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.123864 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgs7r\" (UniqueName: \"kubernetes.io/projected/ba7324f0-6a1f-402b-a403-445482d46dc4-kube-api-access-tgs7r\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.123940 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba7324f0-6a1f-402b-a403-445482d46dc4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.123987 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba7324f0-6a1f-402b-a403-445482d46dc4-config-data\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.228828 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgs7r\" (UniqueName: \"kubernetes.io/projected/ba7324f0-6a1f-402b-a403-445482d46dc4-kube-api-access-tgs7r\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.228903 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba7324f0-6a1f-402b-a403-445482d46dc4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.229033 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba7324f0-6a1f-402b-a403-445482d46dc4-config-data\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.234605 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba7324f0-6a1f-402b-a403-445482d46dc4-config-data\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.234645 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba7324f0-6a1f-402b-a403-445482d46dc4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.247559 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgs7r\" (UniqueName: \"kubernetes.io/projected/ba7324f0-6a1f-402b-a403-445482d46dc4-kube-api-access-tgs7r\") pod \"nova-scheduler-0\" (UID: \"ba7324f0-6a1f-402b-a403-445482d46dc4\") " pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.268521 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.591016 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e34de8ff-fa76-4858-99f8-b5c8365adf63","Type":"ContainerStarted","Data":"533f5c62abca9e3ad6e064179037b1adb96e4135faeae2a66ee2fc0e0c8b6d62"} Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.595244 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20","Type":"ContainerStarted","Data":"a404f4511247297f4cc03ac8a28f86d60493e7870d3a53be0473f0503dd49214"} Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.595285 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20","Type":"ContainerStarted","Data":"1ab3dec36f99197d3d525c7d5c04458d056fe600a696ee9afb841800ab4c4530"} Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.595298 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"39d2b1c9-7a75-415a-90ff-cd7a10fb7a20","Type":"ContainerStarted","Data":"27437b5c6be92dc362ce42f6c246a666d2937640b0487625ba63244a5c665eb7"} Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.622323 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.622297245 podStartE2EDuration="3.622297245s" podCreationTimestamp="2026-01-29 08:57:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:58:02.608579662 +0000 UTC m=+8574.280074229" watchObservedRunningTime="2026-01-29 08:58:02.622297245 +0000 UTC m=+8574.293791802" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.650684 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.650662355 podStartE2EDuration="2.650662355s" podCreationTimestamp="2026-01-29 08:58:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:58:02.633912783 +0000 UTC m=+8574.305407340" watchObservedRunningTime="2026-01-29 08:58:02.650662355 +0000 UTC m=+8574.322156922" Jan 29 08:58:02 crc kubenswrapper[4861]: I0129 08:58:02.861782 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 08:58:02 crc kubenswrapper[4861]: W0129 08:58:02.876991 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba7324f0_6a1f_402b_a403_445482d46dc4.slice/crio-dff55d38324680e0ab324b3ca19fd918f9aeda5b0bde0cbc35ba35f1a3114ef5 WatchSource:0}: Error finding container dff55d38324680e0ab324b3ca19fd918f9aeda5b0bde0cbc35ba35f1a3114ef5: Status 404 returned error can't find the container with id dff55d38324680e0ab324b3ca19fd918f9aeda5b0bde0cbc35ba35f1a3114ef5 Jan 29 08:58:03 crc kubenswrapper[4861]: I0129 08:58:03.127900 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6775a96e-88ab-471c-9abb-9a981572d243" path="/var/lib/kubelet/pods/6775a96e-88ab-471c-9abb-9a981572d243/volumes" Jan 29 08:58:03 crc kubenswrapper[4861]: I0129 08:58:03.608699 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ba7324f0-6a1f-402b-a403-445482d46dc4","Type":"ContainerStarted","Data":"ef7d4ceb53b4ba91bf055cf36a0fcd829b897ddee257e9c54150b40c04c58427"} Jan 29 08:58:03 crc kubenswrapper[4861]: I0129 08:58:03.608796 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ba7324f0-6a1f-402b-a403-445482d46dc4","Type":"ContainerStarted","Data":"dff55d38324680e0ab324b3ca19fd918f9aeda5b0bde0cbc35ba35f1a3114ef5"} Jan 29 08:58:03 crc kubenswrapper[4861]: I0129 08:58:03.631846 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.631823325 podStartE2EDuration="2.631823325s" podCreationTimestamp="2026-01-29 08:58:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 08:58:03.626040893 +0000 UTC m=+8575.297535480" watchObservedRunningTime="2026-01-29 08:58:03.631823325 +0000 UTC m=+8575.303317882" Jan 29 08:58:05 crc kubenswrapper[4861]: I0129 08:58:05.292247 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 08:58:05 crc kubenswrapper[4861]: I0129 08:58:05.292573 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 08:58:05 crc kubenswrapper[4861]: I0129 08:58:05.872831 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 29 08:58:07 crc kubenswrapper[4861]: I0129 08:58:07.278454 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 08:58:07 crc kubenswrapper[4861]: I0129 08:58:07.951380 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 29 08:58:09 crc kubenswrapper[4861]: I0129 08:58:09.122428 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:58:09 crc kubenswrapper[4861]: E0129 08:58:09.123009 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:58:10 crc kubenswrapper[4861]: I0129 08:58:10.292569 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 08:58:10 crc kubenswrapper[4861]: I0129 08:58:10.292828 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 08:58:10 crc kubenswrapper[4861]: I0129 08:58:10.977094 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 08:58:10 crc kubenswrapper[4861]: I0129 08:58:10.978375 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 08:58:11 crc kubenswrapper[4861]: I0129 08:58:11.308390 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e34de8ff-fa76-4858-99f8-b5c8365adf63" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:58:11 crc kubenswrapper[4861]: I0129 08:58:11.308394 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e34de8ff-fa76-4858-99f8-b5c8365adf63" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:58:11 crc kubenswrapper[4861]: I0129 08:58:11.985363 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="39d2b1c9-7a75-415a-90ff-cd7a10fb7a20" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:58:11 crc kubenswrapper[4861]: I0129 08:58:11.990332 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="39d2b1c9-7a75-415a-90ff-cd7a10fb7a20" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 08:58:12 crc kubenswrapper[4861]: I0129 08:58:12.268950 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 08:58:12 crc kubenswrapper[4861]: I0129 08:58:12.304823 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 08:58:12 crc kubenswrapper[4861]: I0129 08:58:12.724336 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.297458 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.298251 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.303652 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.304505 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.984934 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.985032 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.985463 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.985511 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.991386 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 08:58:20 crc kubenswrapper[4861]: I0129 08:58:20.991435 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.163489 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds"] Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.165651 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.170310 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.172362 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.172363 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.173594 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.174031 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.174302 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-f4vld" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.176591 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.179013 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds"] Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.302796 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.302865 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.302947 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pc6w\" (UniqueName: \"kubernetes.io/projected/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-kube-api-access-4pc6w\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.303210 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.303320 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.303371 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.325690 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.325738 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.325853 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428217 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428291 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428332 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428398 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428423 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428475 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428532 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428567 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.428636 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pc6w\" (UniqueName: \"kubernetes.io/projected/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-kube-api-access-4pc6w\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.429993 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.434374 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.434457 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-ssh-key-openstack-cell1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.434772 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.435342 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.436671 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.436814 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.447176 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pc6w\" (UniqueName: \"kubernetes.io/projected/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-kube-api-access-4pc6w\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.449008 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:22 crc kubenswrapper[4861]: I0129 08:58:22.488710 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 08:58:23 crc kubenswrapper[4861]: W0129 08:58:23.073323 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a7fa956_9445_4483_b3c5_9d0548f8f2b4.slice/crio-282e713ef961651737cb3c91c813fc0f0e7b6137a18bda975751c701fb4e7f69 WatchSource:0}: Error finding container 282e713ef961651737cb3c91c813fc0f0e7b6137a18bda975751c701fb4e7f69: Status 404 returned error can't find the container with id 282e713ef961651737cb3c91c813fc0f0e7b6137a18bda975751c701fb4e7f69 Jan 29 08:58:23 crc kubenswrapper[4861]: I0129 08:58:23.074961 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds"] Jan 29 08:58:23 crc kubenswrapper[4861]: I0129 08:58:23.116831 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:58:23 crc kubenswrapper[4861]: E0129 08:58:23.117174 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:58:23 crc kubenswrapper[4861]: I0129 08:58:23.837842 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" event={"ID":"5a7fa956-9445-4483-b3c5-9d0548f8f2b4","Type":"ContainerStarted","Data":"b9ac14e303fbf6f5605bc55e6b948d173cf8d5acb179b3fc0b4dbbe6cec486c6"} Jan 29 08:58:23 crc kubenswrapper[4861]: I0129 08:58:23.839161 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" event={"ID":"5a7fa956-9445-4483-b3c5-9d0548f8f2b4","Type":"ContainerStarted","Data":"282e713ef961651737cb3c91c813fc0f0e7b6137a18bda975751c701fb4e7f69"} Jan 29 08:58:24 crc kubenswrapper[4861]: I0129 08:58:24.870490 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" podStartSLOduration=2.441367234 podStartE2EDuration="2.870470013s" podCreationTimestamp="2026-01-29 08:58:22 +0000 UTC" firstStartedPulling="2026-01-29 08:58:23.076424425 +0000 UTC m=+8594.747918982" lastFinishedPulling="2026-01-29 08:58:23.505527204 +0000 UTC m=+8595.177021761" observedRunningTime="2026-01-29 08:58:24.864205867 +0000 UTC m=+8596.535700424" watchObservedRunningTime="2026-01-29 08:58:24.870470013 +0000 UTC m=+8596.541964570" Jan 29 08:58:37 crc kubenswrapper[4861]: I0129 08:58:37.116738 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:58:37 crc kubenswrapper[4861]: E0129 08:58:37.118399 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:58:48 crc kubenswrapper[4861]: I0129 08:58:48.116506 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:58:48 crc kubenswrapper[4861]: E0129 08:58:48.117314 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:59:01 crc kubenswrapper[4861]: I0129 08:59:01.117208 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:59:01 crc kubenswrapper[4861]: E0129 08:59:01.117950 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:59:12 crc kubenswrapper[4861]: I0129 08:59:12.117464 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:59:12 crc kubenswrapper[4861]: E0129 08:59:12.118451 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:59:24 crc kubenswrapper[4861]: I0129 08:59:24.117060 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:59:24 crc kubenswrapper[4861]: E0129 08:59:24.118041 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:59:37 crc kubenswrapper[4861]: I0129 08:59:37.117212 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:59:37 crc kubenswrapper[4861]: E0129 08:59:37.118128 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:59:50 crc kubenswrapper[4861]: I0129 08:59:50.116540 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 08:59:50 crc kubenswrapper[4861]: E0129 08:59:50.117381 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.054132 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vdprb"] Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.058030 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.065890 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vdprb"] Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.254895 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-catalog-content\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.254967 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kml26\" (UniqueName: \"kubernetes.io/projected/acb8a0a4-b178-47a2-9d96-0ecf94abf560-kube-api-access-kml26\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.254992 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-utilities\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.357320 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-catalog-content\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.357400 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kml26\" (UniqueName: \"kubernetes.io/projected/acb8a0a4-b178-47a2-9d96-0ecf94abf560-kube-api-access-kml26\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.357432 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-utilities\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.357841 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-catalog-content\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.357955 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-utilities\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.684034 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kml26\" (UniqueName: \"kubernetes.io/projected/acb8a0a4-b178-47a2-9d96-0ecf94abf560-kube-api-access-kml26\") pod \"community-operators-vdprb\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:56 crc kubenswrapper[4861]: I0129 08:59:56.984359 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdprb" Jan 29 08:59:57 crc kubenswrapper[4861]: I0129 08:59:57.461313 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vdprb"] Jan 29 08:59:57 crc kubenswrapper[4861]: I0129 08:59:57.712493 4861 generic.go:334] "Generic (PLEG): container finished" podID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerID="8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7" exitCode=0 Jan 29 08:59:57 crc kubenswrapper[4861]: I0129 08:59:57.712537 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdprb" event={"ID":"acb8a0a4-b178-47a2-9d96-0ecf94abf560","Type":"ContainerDied","Data":"8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7"} Jan 29 08:59:57 crc kubenswrapper[4861]: I0129 08:59:57.712562 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdprb" event={"ID":"acb8a0a4-b178-47a2-9d96-0ecf94abf560","Type":"ContainerStarted","Data":"f58af663365fb75df85b742c5b7213e5fbd2e2d11b4156115efcc723589d7b18"} Jan 29 08:59:58 crc kubenswrapper[4861]: I0129 08:59:58.727767 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdprb" event={"ID":"acb8a0a4-b178-47a2-9d96-0ecf94abf560","Type":"ContainerStarted","Data":"807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df"} Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.152351 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f"] Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.154312 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.156261 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.157303 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.172347 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f"] Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.266928 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9650c53-f61e-4634-a388-0345ddd08c6a-config-volume\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.266978 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xxmc\" (UniqueName: \"kubernetes.io/projected/b9650c53-f61e-4634-a388-0345ddd08c6a-kube-api-access-2xxmc\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.267008 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9650c53-f61e-4634-a388-0345ddd08c6a-secret-volume\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.369751 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9650c53-f61e-4634-a388-0345ddd08c6a-config-volume\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.369806 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xxmc\" (UniqueName: \"kubernetes.io/projected/b9650c53-f61e-4634-a388-0345ddd08c6a-kube-api-access-2xxmc\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.369836 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9650c53-f61e-4634-a388-0345ddd08c6a-secret-volume\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.371190 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9650c53-f61e-4634-a388-0345ddd08c6a-config-volume\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.377582 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9650c53-f61e-4634-a388-0345ddd08c6a-secret-volume\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.388900 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xxmc\" (UniqueName: \"kubernetes.io/projected/b9650c53-f61e-4634-a388-0345ddd08c6a-kube-api-access-2xxmc\") pod \"collect-profiles-29494620-qt88f\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.482745 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.755570 4861 generic.go:334] "Generic (PLEG): container finished" podID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerID="807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df" exitCode=0 Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.755663 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdprb" event={"ID":"acb8a0a4-b178-47a2-9d96-0ecf94abf560","Type":"ContainerDied","Data":"807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df"} Jan 29 09:00:00 crc kubenswrapper[4861]: I0129 09:00:00.935388 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f"] Jan 29 09:00:00 crc kubenswrapper[4861]: W0129 09:00:00.937397 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9650c53_f61e_4634_a388_0345ddd08c6a.slice/crio-2441a67bc9a1c30201642a9387ce983263cd046acde06b932727c5ae1da85ef2 WatchSource:0}: Error finding container 2441a67bc9a1c30201642a9387ce983263cd046acde06b932727c5ae1da85ef2: Status 404 returned error can't find the container with id 2441a67bc9a1c30201642a9387ce983263cd046acde06b932727c5ae1da85ef2 Jan 29 09:00:01 crc kubenswrapper[4861]: I0129 09:00:01.116703 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 09:00:01 crc kubenswrapper[4861]: E0129 09:00:01.117582 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:00:01 crc kubenswrapper[4861]: I0129 09:00:01.767506 4861 generic.go:334] "Generic (PLEG): container finished" podID="b9650c53-f61e-4634-a388-0345ddd08c6a" containerID="e9587dd8d459b7ac2056edbd2f49d4eb5bfa31b19fa4813dca60cdc684ff1474" exitCode=0 Jan 29 09:00:01 crc kubenswrapper[4861]: I0129 09:00:01.767557 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" event={"ID":"b9650c53-f61e-4634-a388-0345ddd08c6a","Type":"ContainerDied","Data":"e9587dd8d459b7ac2056edbd2f49d4eb5bfa31b19fa4813dca60cdc684ff1474"} Jan 29 09:00:01 crc kubenswrapper[4861]: I0129 09:00:01.767911 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" event={"ID":"b9650c53-f61e-4634-a388-0345ddd08c6a","Type":"ContainerStarted","Data":"2441a67bc9a1c30201642a9387ce983263cd046acde06b932727c5ae1da85ef2"} Jan 29 09:00:01 crc kubenswrapper[4861]: I0129 09:00:01.772625 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdprb" event={"ID":"acb8a0a4-b178-47a2-9d96-0ecf94abf560","Type":"ContainerStarted","Data":"ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb"} Jan 29 09:00:01 crc kubenswrapper[4861]: I0129 09:00:01.815900 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vdprb" podStartSLOduration=2.282155253 podStartE2EDuration="5.815877512s" podCreationTimestamp="2026-01-29 08:59:56 +0000 UTC" firstStartedPulling="2026-01-29 08:59:57.715693161 +0000 UTC m=+8689.387187718" lastFinishedPulling="2026-01-29 09:00:01.24941542 +0000 UTC m=+8692.920909977" observedRunningTime="2026-01-29 09:00:01.809205796 +0000 UTC m=+8693.480700373" watchObservedRunningTime="2026-01-29 09:00:01.815877512 +0000 UTC m=+8693.487372069" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.167284 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.255098 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9650c53-f61e-4634-a388-0345ddd08c6a-config-volume\") pod \"b9650c53-f61e-4634-a388-0345ddd08c6a\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.255359 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9650c53-f61e-4634-a388-0345ddd08c6a-secret-volume\") pod \"b9650c53-f61e-4634-a388-0345ddd08c6a\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.255424 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xxmc\" (UniqueName: \"kubernetes.io/projected/b9650c53-f61e-4634-a388-0345ddd08c6a-kube-api-access-2xxmc\") pod \"b9650c53-f61e-4634-a388-0345ddd08c6a\" (UID: \"b9650c53-f61e-4634-a388-0345ddd08c6a\") " Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.256016 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9650c53-f61e-4634-a388-0345ddd08c6a-config-volume" (OuterVolumeSpecName: "config-volume") pod "b9650c53-f61e-4634-a388-0345ddd08c6a" (UID: "b9650c53-f61e-4634-a388-0345ddd08c6a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.262353 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9650c53-f61e-4634-a388-0345ddd08c6a-kube-api-access-2xxmc" (OuterVolumeSpecName: "kube-api-access-2xxmc") pod "b9650c53-f61e-4634-a388-0345ddd08c6a" (UID: "b9650c53-f61e-4634-a388-0345ddd08c6a"). InnerVolumeSpecName "kube-api-access-2xxmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.271760 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9650c53-f61e-4634-a388-0345ddd08c6a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b9650c53-f61e-4634-a388-0345ddd08c6a" (UID: "b9650c53-f61e-4634-a388-0345ddd08c6a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.357737 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xxmc\" (UniqueName: \"kubernetes.io/projected/b9650c53-f61e-4634-a388-0345ddd08c6a-kube-api-access-2xxmc\") on node \"crc\" DevicePath \"\"" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.357770 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9650c53-f61e-4634-a388-0345ddd08c6a-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.357779 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9650c53-f61e-4634-a388-0345ddd08c6a-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.790196 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" event={"ID":"b9650c53-f61e-4634-a388-0345ddd08c6a","Type":"ContainerDied","Data":"2441a67bc9a1c30201642a9387ce983263cd046acde06b932727c5ae1da85ef2"} Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.790239 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2441a67bc9a1c30201642a9387ce983263cd046acde06b932727c5ae1da85ef2" Jan 29 09:00:03 crc kubenswrapper[4861]: I0129 09:00:03.790256 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494620-qt88f" Jan 29 09:00:04 crc kubenswrapper[4861]: I0129 09:00:04.288865 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w"] Jan 29 09:00:04 crc kubenswrapper[4861]: I0129 09:00:04.300133 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494575-ftg8w"] Jan 29 09:00:05 crc kubenswrapper[4861]: I0129 09:00:05.130678 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58031ad0-9a82-4948-a2dd-cf318360285f" path="/var/lib/kubelet/pods/58031ad0-9a82-4948-a2dd-cf318360285f/volumes" Jan 29 09:00:06 crc kubenswrapper[4861]: I0129 09:00:06.984666 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vdprb" Jan 29 09:00:06 crc kubenswrapper[4861]: I0129 09:00:06.985003 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vdprb" Jan 29 09:00:07 crc kubenswrapper[4861]: I0129 09:00:07.326864 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vdprb" Jan 29 09:00:07 crc kubenswrapper[4861]: I0129 09:00:07.886826 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vdprb" Jan 29 09:00:07 crc kubenswrapper[4861]: I0129 09:00:07.944690 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vdprb"] Jan 29 09:00:09 crc kubenswrapper[4861]: I0129 09:00:09.843275 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vdprb" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerName="registry-server" containerID="cri-o://ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb" gracePeriod=2 Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.320561 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdprb" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.543131 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kml26\" (UniqueName: \"kubernetes.io/projected/acb8a0a4-b178-47a2-9d96-0ecf94abf560-kube-api-access-kml26\") pod \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.543262 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-utilities\") pod \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.543370 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-catalog-content\") pod \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\" (UID: \"acb8a0a4-b178-47a2-9d96-0ecf94abf560\") " Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.545309 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-utilities" (OuterVolumeSpecName: "utilities") pod "acb8a0a4-b178-47a2-9d96-0ecf94abf560" (UID: "acb8a0a4-b178-47a2-9d96-0ecf94abf560"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.569359 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acb8a0a4-b178-47a2-9d96-0ecf94abf560-kube-api-access-kml26" (OuterVolumeSpecName: "kube-api-access-kml26") pod "acb8a0a4-b178-47a2-9d96-0ecf94abf560" (UID: "acb8a0a4-b178-47a2-9d96-0ecf94abf560"). InnerVolumeSpecName "kube-api-access-kml26". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.614828 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "acb8a0a4-b178-47a2-9d96-0ecf94abf560" (UID: "acb8a0a4-b178-47a2-9d96-0ecf94abf560"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.646387 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.646419 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb8a0a4-b178-47a2-9d96-0ecf94abf560-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.646434 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kml26\" (UniqueName: \"kubernetes.io/projected/acb8a0a4-b178-47a2-9d96-0ecf94abf560-kube-api-access-kml26\") on node \"crc\" DevicePath \"\"" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.854362 4861 generic.go:334] "Generic (PLEG): container finished" podID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerID="ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb" exitCode=0 Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.854409 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdprb" event={"ID":"acb8a0a4-b178-47a2-9d96-0ecf94abf560","Type":"ContainerDied","Data":"ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb"} Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.854463 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdprb" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.854479 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdprb" event={"ID":"acb8a0a4-b178-47a2-9d96-0ecf94abf560","Type":"ContainerDied","Data":"f58af663365fb75df85b742c5b7213e5fbd2e2d11b4156115efcc723589d7b18"} Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.854508 4861 scope.go:117] "RemoveContainer" containerID="ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.880390 4861 scope.go:117] "RemoveContainer" containerID="807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.889625 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vdprb"] Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.900061 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vdprb"] Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.910615 4861 scope.go:117] "RemoveContainer" containerID="8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.956252 4861 scope.go:117] "RemoveContainer" containerID="ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb" Jan 29 09:00:10 crc kubenswrapper[4861]: E0129 09:00:10.956817 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb\": container with ID starting with ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb not found: ID does not exist" containerID="ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.956869 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb"} err="failed to get container status \"ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb\": rpc error: code = NotFound desc = could not find container \"ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb\": container with ID starting with ff2fdef265a437a7caadce0e5490132ead578856cc4fd0a427cda45ed68f4ecb not found: ID does not exist" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.956896 4861 scope.go:117] "RemoveContainer" containerID="807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df" Jan 29 09:00:10 crc kubenswrapper[4861]: E0129 09:00:10.958425 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df\": container with ID starting with 807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df not found: ID does not exist" containerID="807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.958452 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df"} err="failed to get container status \"807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df\": rpc error: code = NotFound desc = could not find container \"807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df\": container with ID starting with 807002c0c7c2544af83a6912c15784bf761c07f500c2c0bef9206a0a46d169df not found: ID does not exist" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.958466 4861 scope.go:117] "RemoveContainer" containerID="8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7" Jan 29 09:00:10 crc kubenswrapper[4861]: E0129 09:00:10.958907 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7\": container with ID starting with 8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7 not found: ID does not exist" containerID="8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7" Jan 29 09:00:10 crc kubenswrapper[4861]: I0129 09:00:10.958956 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7"} err="failed to get container status \"8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7\": rpc error: code = NotFound desc = could not find container \"8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7\": container with ID starting with 8ec92feefd3fc73f29f3e3f242db1df94bd0d0726346b5a7df9c36666e9c24b7 not found: ID does not exist" Jan 29 09:00:11 crc kubenswrapper[4861]: I0129 09:00:11.129556 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" path="/var/lib/kubelet/pods/acb8a0a4-b178-47a2-9d96-0ecf94abf560/volumes" Jan 29 09:00:12 crc kubenswrapper[4861]: I0129 09:00:12.118139 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 09:00:12 crc kubenswrapper[4861]: E0129 09:00:12.119385 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:00:25 crc kubenswrapper[4861]: I0129 09:00:25.116477 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 09:00:25 crc kubenswrapper[4861]: E0129 09:00:25.118920 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:00:33 crc kubenswrapper[4861]: I0129 09:00:33.577645 4861 scope.go:117] "RemoveContainer" containerID="51e52a5d8b34952c802563cbe111c7a8e57b4c8a82f4fb7d1869a577154fda05" Jan 29 09:00:36 crc kubenswrapper[4861]: I0129 09:00:36.116430 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 09:00:36 crc kubenswrapper[4861]: E0129 09:00:36.117126 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:00:47 crc kubenswrapper[4861]: I0129 09:00:47.116794 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 09:00:47 crc kubenswrapper[4861]: E0129 09:00:47.118049 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:00:48 crc kubenswrapper[4861]: I0129 09:00:48.783249 4861 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="3da8e0ee-9c1a-4557-a727-14de15187b68" containerName="galera" probeResult="failure" output="command timed out" Jan 29 09:00:48 crc kubenswrapper[4861]: I0129 09:00:48.783633 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="3da8e0ee-9c1a-4557-a727-14de15187b68" containerName="galera" probeResult="failure" output="command timed out" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.157745 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29494621-thqv2"] Jan 29 09:01:00 crc kubenswrapper[4861]: E0129 09:01:00.158870 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerName="extract-content" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.158889 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerName="extract-content" Jan 29 09:01:00 crc kubenswrapper[4861]: E0129 09:01:00.158921 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9650c53-f61e-4634-a388-0345ddd08c6a" containerName="collect-profiles" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.158929 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9650c53-f61e-4634-a388-0345ddd08c6a" containerName="collect-profiles" Jan 29 09:01:00 crc kubenswrapper[4861]: E0129 09:01:00.158947 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerName="extract-utilities" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.158954 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerName="extract-utilities" Jan 29 09:01:00 crc kubenswrapper[4861]: E0129 09:01:00.158969 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerName="registry-server" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.158976 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerName="registry-server" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.159276 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9650c53-f61e-4634-a388-0345ddd08c6a" containerName="collect-profiles" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.159294 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="acb8a0a4-b178-47a2-9d96-0ecf94abf560" containerName="registry-server" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.160218 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.178268 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29494621-thqv2"] Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.294917 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-combined-ca-bundle\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.295060 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdfts\" (UniqueName: \"kubernetes.io/projected/0a51c126-e023-43a7-91a1-4d806f2adb73-kube-api-access-xdfts\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.295279 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-config-data\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.295454 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-fernet-keys\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.397562 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdfts\" (UniqueName: \"kubernetes.io/projected/0a51c126-e023-43a7-91a1-4d806f2adb73-kube-api-access-xdfts\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.398240 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-config-data\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.399692 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-fernet-keys\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.399990 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-combined-ca-bundle\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.405983 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-fernet-keys\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.406353 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-combined-ca-bundle\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.413995 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-config-data\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.415367 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdfts\" (UniqueName: \"kubernetes.io/projected/0a51c126-e023-43a7-91a1-4d806f2adb73-kube-api-access-xdfts\") pod \"keystone-cron-29494621-thqv2\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.494744 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:00 crc kubenswrapper[4861]: I0129 09:01:00.971222 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29494621-thqv2"] Jan 29 09:01:01 crc kubenswrapper[4861]: I0129 09:01:01.116702 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 09:01:01 crc kubenswrapper[4861]: I0129 09:01:01.911286 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29494621-thqv2" event={"ID":"0a51c126-e023-43a7-91a1-4d806f2adb73","Type":"ContainerStarted","Data":"b5aaf65d081413f1a2ba05b90e2e49b57859468594e5224ef69acf467cc5f8f0"} Jan 29 09:01:01 crc kubenswrapper[4861]: I0129 09:01:01.911870 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29494621-thqv2" event={"ID":"0a51c126-e023-43a7-91a1-4d806f2adb73","Type":"ContainerStarted","Data":"4c440eff4064ebe70b8c9e98845efdc2efa8cb7e514c59909ff4a048b01384f3"} Jan 29 09:01:01 crc kubenswrapper[4861]: I0129 09:01:01.917362 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"61395d172ec3f374d64d044677c1415cf92c79889300b56f1ecd0883f2f5ce33"} Jan 29 09:01:01 crc kubenswrapper[4861]: I0129 09:01:01.945551 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29494621-thqv2" podStartSLOduration=1.9455225440000001 podStartE2EDuration="1.945522544s" podCreationTimestamp="2026-01-29 09:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 09:01:01.937946544 +0000 UTC m=+8753.609441121" watchObservedRunningTime="2026-01-29 09:01:01.945522544 +0000 UTC m=+8753.617017111" Jan 29 09:01:04 crc kubenswrapper[4861]: I0129 09:01:04.955502 4861 generic.go:334] "Generic (PLEG): container finished" podID="0a51c126-e023-43a7-91a1-4d806f2adb73" containerID="b5aaf65d081413f1a2ba05b90e2e49b57859468594e5224ef69acf467cc5f8f0" exitCode=0 Jan 29 09:01:04 crc kubenswrapper[4861]: I0129 09:01:04.955610 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29494621-thqv2" event={"ID":"0a51c126-e023-43a7-91a1-4d806f2adb73","Type":"ContainerDied","Data":"b5aaf65d081413f1a2ba05b90e2e49b57859468594e5224ef69acf467cc5f8f0"} Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.308320 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.344439 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-fernet-keys\") pod \"0a51c126-e023-43a7-91a1-4d806f2adb73\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.344694 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdfts\" (UniqueName: \"kubernetes.io/projected/0a51c126-e023-43a7-91a1-4d806f2adb73-kube-api-access-xdfts\") pod \"0a51c126-e023-43a7-91a1-4d806f2adb73\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.344731 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-config-data\") pod \"0a51c126-e023-43a7-91a1-4d806f2adb73\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.344805 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-combined-ca-bundle\") pod \"0a51c126-e023-43a7-91a1-4d806f2adb73\" (UID: \"0a51c126-e023-43a7-91a1-4d806f2adb73\") " Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.353471 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a51c126-e023-43a7-91a1-4d806f2adb73-kube-api-access-xdfts" (OuterVolumeSpecName: "kube-api-access-xdfts") pod "0a51c126-e023-43a7-91a1-4d806f2adb73" (UID: "0a51c126-e023-43a7-91a1-4d806f2adb73"). InnerVolumeSpecName "kube-api-access-xdfts". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.372254 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "0a51c126-e023-43a7-91a1-4d806f2adb73" (UID: "0a51c126-e023-43a7-91a1-4d806f2adb73"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.391294 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a51c126-e023-43a7-91a1-4d806f2adb73" (UID: "0a51c126-e023-43a7-91a1-4d806f2adb73"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.429284 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-config-data" (OuterVolumeSpecName: "config-data") pod "0a51c126-e023-43a7-91a1-4d806f2adb73" (UID: "0a51c126-e023-43a7-91a1-4d806f2adb73"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.447358 4861 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.447389 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdfts\" (UniqueName: \"kubernetes.io/projected/0a51c126-e023-43a7-91a1-4d806f2adb73-kube-api-access-xdfts\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.447420 4861 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.447428 4861 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a51c126-e023-43a7-91a1-4d806f2adb73-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.974952 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29494621-thqv2" event={"ID":"0a51c126-e023-43a7-91a1-4d806f2adb73","Type":"ContainerDied","Data":"4c440eff4064ebe70b8c9e98845efdc2efa8cb7e514c59909ff4a048b01384f3"} Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.974987 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c440eff4064ebe70b8c9e98845efdc2efa8cb7e514c59909ff4a048b01384f3" Jan 29 09:01:06 crc kubenswrapper[4861]: I0129 09:01:06.975009 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29494621-thqv2" Jan 29 09:01:11 crc kubenswrapper[4861]: I0129 09:01:11.016599 4861 generic.go:334] "Generic (PLEG): container finished" podID="5a7fa956-9445-4483-b3c5-9d0548f8f2b4" containerID="b9ac14e303fbf6f5605bc55e6b948d173cf8d5acb179b3fc0b4dbbe6cec486c6" exitCode=0 Jan 29 09:01:11 crc kubenswrapper[4861]: I0129 09:01:11.016808 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" event={"ID":"5a7fa956-9445-4483-b3c5-9d0548f8f2b4","Type":"ContainerDied","Data":"b9ac14e303fbf6f5605bc55e6b948d173cf8d5acb179b3fc0b4dbbe6cec486c6"} Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.462336 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516256 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-0\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516341 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-inventory\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516375 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-0\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516401 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-1\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516428 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-combined-ca-bundle\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516462 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pc6w\" (UniqueName: \"kubernetes.io/projected/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-kube-api-access-4pc6w\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516499 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-ssh-key-openstack-cell1\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516592 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cells-global-config-0\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.516759 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-1\") pod \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\" (UID: \"5a7fa956-9445-4483-b3c5-9d0548f8f2b4\") " Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.522366 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.535627 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-kube-api-access-4pc6w" (OuterVolumeSpecName: "kube-api-access-4pc6w") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "kube-api-access-4pc6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.554415 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.560586 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.569181 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-inventory" (OuterVolumeSpecName: "inventory") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.571364 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.572347 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.583505 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.596581 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "5a7fa956-9445-4483-b3c5-9d0548f8f2b4" (UID: "5a7fa956-9445-4483-b3c5-9d0548f8f2b4"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.619561 4861 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.619955 4861 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-inventory\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.620160 4861 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.620245 4861 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.620347 4861 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.620433 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pc6w\" (UniqueName: \"kubernetes.io/projected/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-kube-api-access-4pc6w\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.620511 4861 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.620592 4861 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:12 crc kubenswrapper[4861]: I0129 09:01:12.620648 4861 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/5a7fa956-9445-4483-b3c5-9d0548f8f2b4-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:13 crc kubenswrapper[4861]: I0129 09:01:13.040397 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" event={"ID":"5a7fa956-9445-4483-b3c5-9d0548f8f2b4","Type":"ContainerDied","Data":"282e713ef961651737cb3c91c813fc0f0e7b6137a18bda975751c701fb4e7f69"} Jan 29 09:01:13 crc kubenswrapper[4861]: I0129 09:01:13.040948 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="282e713ef961651737cb3c91c813fc0f0e7b6137a18bda975751c701fb4e7f69" Jan 29 09:01:13 crc kubenswrapper[4861]: I0129 09:01:13.040451 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.418519 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dckbb"] Jan 29 09:01:25 crc kubenswrapper[4861]: E0129 09:01:25.419388 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a51c126-e023-43a7-91a1-4d806f2adb73" containerName="keystone-cron" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.419401 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a51c126-e023-43a7-91a1-4d806f2adb73" containerName="keystone-cron" Jan 29 09:01:25 crc kubenswrapper[4861]: E0129 09:01:25.419432 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a7fa956-9445-4483-b3c5-9d0548f8f2b4" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.419440 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a7fa956-9445-4483-b3c5-9d0548f8f2b4" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.419664 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a51c126-e023-43a7-91a1-4d806f2adb73" containerName="keystone-cron" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.419683 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a7fa956-9445-4483-b3c5-9d0548f8f2b4" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.421286 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.432527 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dckbb"] Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.520294 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-utilities\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.520400 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9kx7\" (UniqueName: \"kubernetes.io/projected/b5204c41-181c-47e0-a2df-d1e718700e8d-kube-api-access-x9kx7\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.520471 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-catalog-content\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.622797 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9kx7\" (UniqueName: \"kubernetes.io/projected/b5204c41-181c-47e0-a2df-d1e718700e8d-kube-api-access-x9kx7\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.623166 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-catalog-content\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.623278 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-utilities\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.623641 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-catalog-content\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.624039 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-utilities\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.645884 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9kx7\" (UniqueName: \"kubernetes.io/projected/b5204c41-181c-47e0-a2df-d1e718700e8d-kube-api-access-x9kx7\") pod \"redhat-operators-dckbb\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:25 crc kubenswrapper[4861]: I0129 09:01:25.770994 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:26 crc kubenswrapper[4861]: I0129 09:01:26.271334 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dckbb"] Jan 29 09:01:26 crc kubenswrapper[4861]: W0129 09:01:26.285349 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5204c41_181c_47e0_a2df_d1e718700e8d.slice/crio-4a02a7530c8b39e53a0bb84aef8f6b89ac0e90ec9c68901e47e3eded0c7905e4 WatchSource:0}: Error finding container 4a02a7530c8b39e53a0bb84aef8f6b89ac0e90ec9c68901e47e3eded0c7905e4: Status 404 returned error can't find the container with id 4a02a7530c8b39e53a0bb84aef8f6b89ac0e90ec9c68901e47e3eded0c7905e4 Jan 29 09:01:27 crc kubenswrapper[4861]: I0129 09:01:27.188197 4861 generic.go:334] "Generic (PLEG): container finished" podID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerID="878602ba9d3946c6d0f21a2ca30336044f53eb4f4170e5bdac6df22c1abf4413" exitCode=0 Jan 29 09:01:27 crc kubenswrapper[4861]: I0129 09:01:27.188544 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dckbb" event={"ID":"b5204c41-181c-47e0-a2df-d1e718700e8d","Type":"ContainerDied","Data":"878602ba9d3946c6d0f21a2ca30336044f53eb4f4170e5bdac6df22c1abf4413"} Jan 29 09:01:27 crc kubenswrapper[4861]: I0129 09:01:27.188579 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dckbb" event={"ID":"b5204c41-181c-47e0-a2df-d1e718700e8d","Type":"ContainerStarted","Data":"4a02a7530c8b39e53a0bb84aef8f6b89ac0e90ec9c68901e47e3eded0c7905e4"} Jan 29 09:01:28 crc kubenswrapper[4861]: I0129 09:01:28.200140 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dckbb" event={"ID":"b5204c41-181c-47e0-a2df-d1e718700e8d","Type":"ContainerStarted","Data":"545eca76397d73fd0f0b19d174b165f77ba97dbbc9388fafec08039c0e9f08ca"} Jan 29 09:01:34 crc kubenswrapper[4861]: I0129 09:01:34.264830 4861 generic.go:334] "Generic (PLEG): container finished" podID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerID="545eca76397d73fd0f0b19d174b165f77ba97dbbc9388fafec08039c0e9f08ca" exitCode=0 Jan 29 09:01:34 crc kubenswrapper[4861]: I0129 09:01:34.264925 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dckbb" event={"ID":"b5204c41-181c-47e0-a2df-d1e718700e8d","Type":"ContainerDied","Data":"545eca76397d73fd0f0b19d174b165f77ba97dbbc9388fafec08039c0e9f08ca"} Jan 29 09:01:35 crc kubenswrapper[4861]: I0129 09:01:35.279122 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dckbb" event={"ID":"b5204c41-181c-47e0-a2df-d1e718700e8d","Type":"ContainerStarted","Data":"3396a17bcc4207f45c6876a11ea95b51c44f861956820b1818c54bb800ed758d"} Jan 29 09:01:35 crc kubenswrapper[4861]: I0129 09:01:35.304843 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dckbb" podStartSLOduration=2.84913301 podStartE2EDuration="10.304820508s" podCreationTimestamp="2026-01-29 09:01:25 +0000 UTC" firstStartedPulling="2026-01-29 09:01:27.191053515 +0000 UTC m=+8778.862548062" lastFinishedPulling="2026-01-29 09:01:34.646741003 +0000 UTC m=+8786.318235560" observedRunningTime="2026-01-29 09:01:35.295221434 +0000 UTC m=+8786.966716011" watchObservedRunningTime="2026-01-29 09:01:35.304820508 +0000 UTC m=+8786.976315065" Jan 29 09:01:35 crc kubenswrapper[4861]: I0129 09:01:35.771908 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:35 crc kubenswrapper[4861]: I0129 09:01:35.773466 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:36 crc kubenswrapper[4861]: I0129 09:01:36.824351 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dckbb" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="registry-server" probeResult="failure" output=< Jan 29 09:01:36 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 09:01:36 crc kubenswrapper[4861]: > Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.440596 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vbhhx"] Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.444392 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.455776 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vbhhx"] Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.572213 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv5xr\" (UniqueName: \"kubernetes.io/projected/3201e849-a0a7-4290-aa9f-83a0b5899f57-kube-api-access-hv5xr\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.572602 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-utilities\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.572694 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-catalog-content\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.674826 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv5xr\" (UniqueName: \"kubernetes.io/projected/3201e849-a0a7-4290-aa9f-83a0b5899f57-kube-api-access-hv5xr\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.674926 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-utilities\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.675018 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-catalog-content\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.675716 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-catalog-content\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.676239 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-utilities\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.698136 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv5xr\" (UniqueName: \"kubernetes.io/projected/3201e849-a0a7-4290-aa9f-83a0b5899f57-kube-api-access-hv5xr\") pod \"certified-operators-vbhhx\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.782401 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.873564 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:45 crc kubenswrapper[4861]: I0129 09:01:45.945890 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:46 crc kubenswrapper[4861]: I0129 09:01:46.450689 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vbhhx"] Jan 29 09:01:47 crc kubenswrapper[4861]: I0129 09:01:47.429320 4861 generic.go:334] "Generic (PLEG): container finished" podID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerID="1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193" exitCode=0 Jan 29 09:01:47 crc kubenswrapper[4861]: I0129 09:01:47.429357 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vbhhx" event={"ID":"3201e849-a0a7-4290-aa9f-83a0b5899f57","Type":"ContainerDied","Data":"1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193"} Jan 29 09:01:47 crc kubenswrapper[4861]: I0129 09:01:47.429380 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vbhhx" event={"ID":"3201e849-a0a7-4290-aa9f-83a0b5899f57","Type":"ContainerStarted","Data":"b9115442d52f5332d2a3d490197e9b1140f4eeea5dec3a3752c65fa14407e5bd"} Jan 29 09:01:47 crc kubenswrapper[4861]: I0129 09:01:47.432007 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.224030 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dckbb"] Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.224858 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dckbb" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="registry-server" containerID="cri-o://3396a17bcc4207f45c6876a11ea95b51c44f861956820b1818c54bb800ed758d" gracePeriod=2 Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.441135 4861 generic.go:334] "Generic (PLEG): container finished" podID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerID="3396a17bcc4207f45c6876a11ea95b51c44f861956820b1818c54bb800ed758d" exitCode=0 Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.441175 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dckbb" event={"ID":"b5204c41-181c-47e0-a2df-d1e718700e8d","Type":"ContainerDied","Data":"3396a17bcc4207f45c6876a11ea95b51c44f861956820b1818c54bb800ed758d"} Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.721658 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.757622 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-catalog-content\") pod \"b5204c41-181c-47e0-a2df-d1e718700e8d\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.757801 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-utilities\") pod \"b5204c41-181c-47e0-a2df-d1e718700e8d\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.757953 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9kx7\" (UniqueName: \"kubernetes.io/projected/b5204c41-181c-47e0-a2df-d1e718700e8d-kube-api-access-x9kx7\") pod \"b5204c41-181c-47e0-a2df-d1e718700e8d\" (UID: \"b5204c41-181c-47e0-a2df-d1e718700e8d\") " Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.758698 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-utilities" (OuterVolumeSpecName: "utilities") pod "b5204c41-181c-47e0-a2df-d1e718700e8d" (UID: "b5204c41-181c-47e0-a2df-d1e718700e8d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.763896 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5204c41-181c-47e0-a2df-d1e718700e8d-kube-api-access-x9kx7" (OuterVolumeSpecName: "kube-api-access-x9kx7") pod "b5204c41-181c-47e0-a2df-d1e718700e8d" (UID: "b5204c41-181c-47e0-a2df-d1e718700e8d"). InnerVolumeSpecName "kube-api-access-x9kx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.860405 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.860654 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9kx7\" (UniqueName: \"kubernetes.io/projected/b5204c41-181c-47e0-a2df-d1e718700e8d-kube-api-access-x9kx7\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.886274 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b5204c41-181c-47e0-a2df-d1e718700e8d" (UID: "b5204c41-181c-47e0-a2df-d1e718700e8d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:01:48 crc kubenswrapper[4861]: I0129 09:01:48.963006 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5204c41-181c-47e0-a2df-d1e718700e8d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:49 crc kubenswrapper[4861]: I0129 09:01:49.454507 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vbhhx" event={"ID":"3201e849-a0a7-4290-aa9f-83a0b5899f57","Type":"ContainerStarted","Data":"a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34"} Jan 29 09:01:49 crc kubenswrapper[4861]: I0129 09:01:49.457776 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dckbb" event={"ID":"b5204c41-181c-47e0-a2df-d1e718700e8d","Type":"ContainerDied","Data":"4a02a7530c8b39e53a0bb84aef8f6b89ac0e90ec9c68901e47e3eded0c7905e4"} Jan 29 09:01:49 crc kubenswrapper[4861]: I0129 09:01:49.457814 4861 scope.go:117] "RemoveContainer" containerID="3396a17bcc4207f45c6876a11ea95b51c44f861956820b1818c54bb800ed758d" Jan 29 09:01:49 crc kubenswrapper[4861]: I0129 09:01:49.457856 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dckbb" Jan 29 09:01:49 crc kubenswrapper[4861]: I0129 09:01:49.499734 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dckbb"] Jan 29 09:01:49 crc kubenswrapper[4861]: I0129 09:01:49.501368 4861 scope.go:117] "RemoveContainer" containerID="545eca76397d73fd0f0b19d174b165f77ba97dbbc9388fafec08039c0e9f08ca" Jan 29 09:01:49 crc kubenswrapper[4861]: I0129 09:01:49.510865 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dckbb"] Jan 29 09:01:49 crc kubenswrapper[4861]: I0129 09:01:49.521823 4861 scope.go:117] "RemoveContainer" containerID="878602ba9d3946c6d0f21a2ca30336044f53eb4f4170e5bdac6df22c1abf4413" Jan 29 09:01:50 crc kubenswrapper[4861]: I0129 09:01:50.471013 4861 generic.go:334] "Generic (PLEG): container finished" podID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerID="a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34" exitCode=0 Jan 29 09:01:50 crc kubenswrapper[4861]: I0129 09:01:50.471127 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vbhhx" event={"ID":"3201e849-a0a7-4290-aa9f-83a0b5899f57","Type":"ContainerDied","Data":"a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34"} Jan 29 09:01:51 crc kubenswrapper[4861]: I0129 09:01:51.127696 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" path="/var/lib/kubelet/pods/b5204c41-181c-47e0-a2df-d1e718700e8d/volumes" Jan 29 09:01:51 crc kubenswrapper[4861]: I0129 09:01:51.484233 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vbhhx" event={"ID":"3201e849-a0a7-4290-aa9f-83a0b5899f57","Type":"ContainerStarted","Data":"722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd"} Jan 29 09:01:51 crc kubenswrapper[4861]: I0129 09:01:51.509777 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vbhhx" podStartSLOduration=3.085453066 podStartE2EDuration="6.509754572s" podCreationTimestamp="2026-01-29 09:01:45 +0000 UTC" firstStartedPulling="2026-01-29 09:01:47.431694917 +0000 UTC m=+8799.103189474" lastFinishedPulling="2026-01-29 09:01:50.855996423 +0000 UTC m=+8802.527490980" observedRunningTime="2026-01-29 09:01:51.501466353 +0000 UTC m=+8803.172960930" watchObservedRunningTime="2026-01-29 09:01:51.509754572 +0000 UTC m=+8803.181249129" Jan 29 09:01:55 crc kubenswrapper[4861]: I0129 09:01:55.782590 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:55 crc kubenswrapper[4861]: I0129 09:01:55.783744 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:55 crc kubenswrapper[4861]: I0129 09:01:55.847060 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:56 crc kubenswrapper[4861]: I0129 09:01:56.598408 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:57 crc kubenswrapper[4861]: I0129 09:01:57.019498 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vbhhx"] Jan 29 09:01:58 crc kubenswrapper[4861]: I0129 09:01:58.557211 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vbhhx" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerName="registry-server" containerID="cri-o://722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd" gracePeriod=2 Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.035880 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.161948 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-utilities\") pod \"3201e849-a0a7-4290-aa9f-83a0b5899f57\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.161994 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hv5xr\" (UniqueName: \"kubernetes.io/projected/3201e849-a0a7-4290-aa9f-83a0b5899f57-kube-api-access-hv5xr\") pod \"3201e849-a0a7-4290-aa9f-83a0b5899f57\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.162052 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-catalog-content\") pod \"3201e849-a0a7-4290-aa9f-83a0b5899f57\" (UID: \"3201e849-a0a7-4290-aa9f-83a0b5899f57\") " Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.163375 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-utilities" (OuterVolumeSpecName: "utilities") pod "3201e849-a0a7-4290-aa9f-83a0b5899f57" (UID: "3201e849-a0a7-4290-aa9f-83a0b5899f57"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.170986 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3201e849-a0a7-4290-aa9f-83a0b5899f57-kube-api-access-hv5xr" (OuterVolumeSpecName: "kube-api-access-hv5xr") pod "3201e849-a0a7-4290-aa9f-83a0b5899f57" (UID: "3201e849-a0a7-4290-aa9f-83a0b5899f57"). InnerVolumeSpecName "kube-api-access-hv5xr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.264799 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.265099 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hv5xr\" (UniqueName: \"kubernetes.io/projected/3201e849-a0a7-4290-aa9f-83a0b5899f57-kube-api-access-hv5xr\") on node \"crc\" DevicePath \"\"" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.569279 4861 generic.go:334] "Generic (PLEG): container finished" podID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerID="722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd" exitCode=0 Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.569331 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vbhhx" event={"ID":"3201e849-a0a7-4290-aa9f-83a0b5899f57","Type":"ContainerDied","Data":"722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd"} Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.569367 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vbhhx" event={"ID":"3201e849-a0a7-4290-aa9f-83a0b5899f57","Type":"ContainerDied","Data":"b9115442d52f5332d2a3d490197e9b1140f4eeea5dec3a3752c65fa14407e5bd"} Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.569366 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vbhhx" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.569456 4861 scope.go:117] "RemoveContainer" containerID="722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.610986 4861 scope.go:117] "RemoveContainer" containerID="a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.632101 4861 scope.go:117] "RemoveContainer" containerID="1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.672664 4861 scope.go:117] "RemoveContainer" containerID="722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd" Jan 29 09:01:59 crc kubenswrapper[4861]: E0129 09:01:59.673018 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd\": container with ID starting with 722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd not found: ID does not exist" containerID="722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.673054 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd"} err="failed to get container status \"722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd\": rpc error: code = NotFound desc = could not find container \"722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd\": container with ID starting with 722db941278b616cbb33a06c7b6dccf415be34c7d43b198de36429c939a791cd not found: ID does not exist" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.673090 4861 scope.go:117] "RemoveContainer" containerID="a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34" Jan 29 09:01:59 crc kubenswrapper[4861]: E0129 09:01:59.673584 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34\": container with ID starting with a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34 not found: ID does not exist" containerID="a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.673605 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34"} err="failed to get container status \"a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34\": rpc error: code = NotFound desc = could not find container \"a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34\": container with ID starting with a7376380234852e8edf7bd8f4f388d9703d44865c7f70ecf21c3f79ee6e13a34 not found: ID does not exist" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.673619 4861 scope.go:117] "RemoveContainer" containerID="1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193" Jan 29 09:01:59 crc kubenswrapper[4861]: E0129 09:01:59.673935 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193\": container with ID starting with 1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193 not found: ID does not exist" containerID="1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.673985 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193"} err="failed to get container status \"1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193\": rpc error: code = NotFound desc = could not find container \"1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193\": container with ID starting with 1ae48142c33e84283eb5707fb98d8ce25e47cb53b21ac35e270ac540347ea193 not found: ID does not exist" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.883139 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3201e849-a0a7-4290-aa9f-83a0b5899f57" (UID: "3201e849-a0a7-4290-aa9f-83a0b5899f57"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:01:59 crc kubenswrapper[4861]: I0129 09:01:59.981271 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3201e849-a0a7-4290-aa9f-83a0b5899f57-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:02:00 crc kubenswrapper[4861]: I0129 09:02:00.210823 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vbhhx"] Jan 29 09:02:00 crc kubenswrapper[4861]: I0129 09:02:00.221755 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vbhhx"] Jan 29 09:02:01 crc kubenswrapper[4861]: I0129 09:02:01.128055 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" path="/var/lib/kubelet/pods/3201e849-a0a7-4290-aa9f-83a0b5899f57/volumes" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.173926 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ntwz2"] Jan 29 09:02:58 crc kubenswrapper[4861]: E0129 09:02:58.174999 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="registry-server" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.175016 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="registry-server" Jan 29 09:02:58 crc kubenswrapper[4861]: E0129 09:02:58.175041 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerName="registry-server" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.175049 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerName="registry-server" Jan 29 09:02:58 crc kubenswrapper[4861]: E0129 09:02:58.175065 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerName="extract-content" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.175093 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerName="extract-content" Jan 29 09:02:58 crc kubenswrapper[4861]: E0129 09:02:58.175128 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="extract-content" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.175136 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="extract-content" Jan 29 09:02:58 crc kubenswrapper[4861]: E0129 09:02:58.175157 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="extract-utilities" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.175164 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="extract-utilities" Jan 29 09:02:58 crc kubenswrapper[4861]: E0129 09:02:58.175182 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerName="extract-utilities" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.175190 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerName="extract-utilities" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.175421 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="3201e849-a0a7-4290-aa9f-83a0b5899f57" containerName="registry-server" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.175440 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5204c41-181c-47e0-a2df-d1e718700e8d" containerName="registry-server" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.177275 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.195719 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntwz2"] Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.280351 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-catalog-content\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.280425 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-utilities\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.280462 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26t89\" (UniqueName: \"kubernetes.io/projected/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-kube-api-access-26t89\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.382296 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-catalog-content\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.382415 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-utilities\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.382851 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-catalog-content\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.382961 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-utilities\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.383130 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26t89\" (UniqueName: \"kubernetes.io/projected/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-kube-api-access-26t89\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.403302 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26t89\" (UniqueName: \"kubernetes.io/projected/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-kube-api-access-26t89\") pod \"redhat-marketplace-ntwz2\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:58 crc kubenswrapper[4861]: I0129 09:02:58.506492 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:02:59 crc kubenswrapper[4861]: I0129 09:02:59.035253 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntwz2"] Jan 29 09:02:59 crc kubenswrapper[4861]: I0129 09:02:59.185121 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntwz2" event={"ID":"8f0934bb-04b2-44c9-ad9a-55b23fc88a03","Type":"ContainerStarted","Data":"6f6deb8983805ae6988f308aa67dd7bd57ab1a97cb342af61e0116af81504328"} Jan 29 09:03:00 crc kubenswrapper[4861]: I0129 09:03:00.195901 4861 generic.go:334] "Generic (PLEG): container finished" podID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerID="6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052" exitCode=0 Jan 29 09:03:00 crc kubenswrapper[4861]: I0129 09:03:00.195967 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntwz2" event={"ID":"8f0934bb-04b2-44c9-ad9a-55b23fc88a03","Type":"ContainerDied","Data":"6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052"} Jan 29 09:03:01 crc kubenswrapper[4861]: I0129 09:03:01.207115 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntwz2" event={"ID":"8f0934bb-04b2-44c9-ad9a-55b23fc88a03","Type":"ContainerStarted","Data":"5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1"} Jan 29 09:03:02 crc kubenswrapper[4861]: I0129 09:03:02.219209 4861 generic.go:334] "Generic (PLEG): container finished" podID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerID="5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1" exitCode=0 Jan 29 09:03:02 crc kubenswrapper[4861]: I0129 09:03:02.219369 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntwz2" event={"ID":"8f0934bb-04b2-44c9-ad9a-55b23fc88a03","Type":"ContainerDied","Data":"5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1"} Jan 29 09:03:03 crc kubenswrapper[4861]: I0129 09:03:03.229878 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntwz2" event={"ID":"8f0934bb-04b2-44c9-ad9a-55b23fc88a03","Type":"ContainerStarted","Data":"24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b"} Jan 29 09:03:03 crc kubenswrapper[4861]: I0129 09:03:03.252330 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ntwz2" podStartSLOduration=2.578171485 podStartE2EDuration="5.252312341s" podCreationTimestamp="2026-01-29 09:02:58 +0000 UTC" firstStartedPulling="2026-01-29 09:03:00.198459293 +0000 UTC m=+8871.869953840" lastFinishedPulling="2026-01-29 09:03:02.872600129 +0000 UTC m=+8874.544094696" observedRunningTime="2026-01-29 09:03:03.247494513 +0000 UTC m=+8874.918989080" watchObservedRunningTime="2026-01-29 09:03:03.252312341 +0000 UTC m=+8874.923806898" Jan 29 09:03:08 crc kubenswrapper[4861]: I0129 09:03:08.506587 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:03:08 crc kubenswrapper[4861]: I0129 09:03:08.507186 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:03:08 crc kubenswrapper[4861]: I0129 09:03:08.559802 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:03:09 crc kubenswrapper[4861]: I0129 09:03:09.332351 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:03:09 crc kubenswrapper[4861]: I0129 09:03:09.377619 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntwz2"] Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.308841 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ntwz2" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerName="registry-server" containerID="cri-o://24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b" gracePeriod=2 Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.833262 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.891692 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-utilities\") pod \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.891795 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-catalog-content\") pod \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.891966 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26t89\" (UniqueName: \"kubernetes.io/projected/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-kube-api-access-26t89\") pod \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\" (UID: \"8f0934bb-04b2-44c9-ad9a-55b23fc88a03\") " Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.892845 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-utilities" (OuterVolumeSpecName: "utilities") pod "8f0934bb-04b2-44c9-ad9a-55b23fc88a03" (UID: "8f0934bb-04b2-44c9-ad9a-55b23fc88a03"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.897551 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-kube-api-access-26t89" (OuterVolumeSpecName: "kube-api-access-26t89") pod "8f0934bb-04b2-44c9-ad9a-55b23fc88a03" (UID: "8f0934bb-04b2-44c9-ad9a-55b23fc88a03"). InnerVolumeSpecName "kube-api-access-26t89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.923278 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f0934bb-04b2-44c9-ad9a-55b23fc88a03" (UID: "8f0934bb-04b2-44c9-ad9a-55b23fc88a03"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.995202 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.995428 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26t89\" (UniqueName: \"kubernetes.io/projected/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-kube-api-access-26t89\") on node \"crc\" DevicePath \"\"" Jan 29 09:03:11 crc kubenswrapper[4861]: I0129 09:03:11.995507 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0934bb-04b2-44c9-ad9a-55b23fc88a03-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.320420 4861 generic.go:334] "Generic (PLEG): container finished" podID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerID="24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b" exitCode=0 Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.320473 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntwz2" event={"ID":"8f0934bb-04b2-44c9-ad9a-55b23fc88a03","Type":"ContainerDied","Data":"24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b"} Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.320502 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntwz2" event={"ID":"8f0934bb-04b2-44c9-ad9a-55b23fc88a03","Type":"ContainerDied","Data":"6f6deb8983805ae6988f308aa67dd7bd57ab1a97cb342af61e0116af81504328"} Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.320519 4861 scope.go:117] "RemoveContainer" containerID="24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.320545 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ntwz2" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.346459 4861 scope.go:117] "RemoveContainer" containerID="5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.356174 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntwz2"] Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.367817 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntwz2"] Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.369970 4861 scope.go:117] "RemoveContainer" containerID="6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.422352 4861 scope.go:117] "RemoveContainer" containerID="24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b" Jan 29 09:03:12 crc kubenswrapper[4861]: E0129 09:03:12.422828 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b\": container with ID starting with 24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b not found: ID does not exist" containerID="24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.422876 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b"} err="failed to get container status \"24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b\": rpc error: code = NotFound desc = could not find container \"24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b\": container with ID starting with 24671034b0738fce4e8922b586bf277ceb3c8dfcd77cc636c7b9053cb949d68b not found: ID does not exist" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.422939 4861 scope.go:117] "RemoveContainer" containerID="5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1" Jan 29 09:03:12 crc kubenswrapper[4861]: E0129 09:03:12.423328 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1\": container with ID starting with 5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1 not found: ID does not exist" containerID="5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.423438 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1"} err="failed to get container status \"5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1\": rpc error: code = NotFound desc = could not find container \"5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1\": container with ID starting with 5b04b5bc446fccb22c8e1e1c4fdb6311620e0812a8d53210d313f7be6cc620b1 not found: ID does not exist" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.423511 4861 scope.go:117] "RemoveContainer" containerID="6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052" Jan 29 09:03:12 crc kubenswrapper[4861]: E0129 09:03:12.423826 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052\": container with ID starting with 6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052 not found: ID does not exist" containerID="6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052" Jan 29 09:03:12 crc kubenswrapper[4861]: I0129 09:03:12.423858 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052"} err="failed to get container status \"6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052\": rpc error: code = NotFound desc = could not find container \"6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052\": container with ID starting with 6ea2743c7b360e500e268ccc4f0260ab0ccf1f8f8d3f616ae4ce9af342842052 not found: ID does not exist" Jan 29 09:03:13 crc kubenswrapper[4861]: I0129 09:03:13.128378 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" path="/var/lib/kubelet/pods/8f0934bb-04b2-44c9-ad9a-55b23fc88a03/volumes" Jan 29 09:03:30 crc kubenswrapper[4861]: I0129 09:03:30.630423 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:03:30 crc kubenswrapper[4861]: I0129 09:03:30.631039 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.236267 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-59th4/must-gather-k4sl8"] Jan 29 09:03:58 crc kubenswrapper[4861]: E0129 09:03:58.238569 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerName="extract-utilities" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.238673 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerName="extract-utilities" Jan 29 09:03:58 crc kubenswrapper[4861]: E0129 09:03:58.238765 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerName="extract-content" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.238840 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerName="extract-content" Jan 29 09:03:58 crc kubenswrapper[4861]: E0129 09:03:58.238957 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerName="registry-server" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.239034 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerName="registry-server" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.239439 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f0934bb-04b2-44c9-ad9a-55b23fc88a03" containerName="registry-server" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.241020 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.243646 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-59th4"/"kube-root-ca.crt" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.244165 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-59th4"/"openshift-service-ca.crt" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.244962 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-59th4"/"default-dockercfg-tj9w5" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.306854 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-59th4/must-gather-k4sl8"] Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.375757 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5blt6\" (UniqueName: \"kubernetes.io/projected/e8494e0d-125e-476c-964c-98d08119fccf-kube-api-access-5blt6\") pod \"must-gather-k4sl8\" (UID: \"e8494e0d-125e-476c-964c-98d08119fccf\") " pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.376048 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e8494e0d-125e-476c-964c-98d08119fccf-must-gather-output\") pod \"must-gather-k4sl8\" (UID: \"e8494e0d-125e-476c-964c-98d08119fccf\") " pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.478055 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5blt6\" (UniqueName: \"kubernetes.io/projected/e8494e0d-125e-476c-964c-98d08119fccf-kube-api-access-5blt6\") pod \"must-gather-k4sl8\" (UID: \"e8494e0d-125e-476c-964c-98d08119fccf\") " pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.478122 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e8494e0d-125e-476c-964c-98d08119fccf-must-gather-output\") pod \"must-gather-k4sl8\" (UID: \"e8494e0d-125e-476c-964c-98d08119fccf\") " pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.478619 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e8494e0d-125e-476c-964c-98d08119fccf-must-gather-output\") pod \"must-gather-k4sl8\" (UID: \"e8494e0d-125e-476c-964c-98d08119fccf\") " pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.496488 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5blt6\" (UniqueName: \"kubernetes.io/projected/e8494e0d-125e-476c-964c-98d08119fccf-kube-api-access-5blt6\") pod \"must-gather-k4sl8\" (UID: \"e8494e0d-125e-476c-964c-98d08119fccf\") " pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:03:58 crc kubenswrapper[4861]: I0129 09:03:58.568178 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:03:59 crc kubenswrapper[4861]: I0129 09:03:59.098592 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-59th4/must-gather-k4sl8"] Jan 29 09:03:59 crc kubenswrapper[4861]: I0129 09:03:59.760424 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/must-gather-k4sl8" event={"ID":"e8494e0d-125e-476c-964c-98d08119fccf","Type":"ContainerStarted","Data":"eda51fa30627e6d3fcee11d85b24a7c2431be7fba72d8a5d0776d837c3cfb2ee"} Jan 29 09:04:00 crc kubenswrapper[4861]: I0129 09:04:00.629460 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:04:00 crc kubenswrapper[4861]: I0129 09:04:00.629796 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:04:07 crc kubenswrapper[4861]: I0129 09:04:07.847549 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/must-gather-k4sl8" event={"ID":"e8494e0d-125e-476c-964c-98d08119fccf","Type":"ContainerStarted","Data":"5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609"} Jan 29 09:04:07 crc kubenswrapper[4861]: I0129 09:04:07.848067 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/must-gather-k4sl8" event={"ID":"e8494e0d-125e-476c-964c-98d08119fccf","Type":"ContainerStarted","Data":"de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427"} Jan 29 09:04:07 crc kubenswrapper[4861]: I0129 09:04:07.866128 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-59th4/must-gather-k4sl8" podStartSLOduration=2.0243785499999998 podStartE2EDuration="9.866109337s" podCreationTimestamp="2026-01-29 09:03:58 +0000 UTC" firstStartedPulling="2026-01-29 09:03:59.10478887 +0000 UTC m=+8930.776283427" lastFinishedPulling="2026-01-29 09:04:06.946519657 +0000 UTC m=+8938.618014214" observedRunningTime="2026-01-29 09:04:07.865503781 +0000 UTC m=+8939.536998338" watchObservedRunningTime="2026-01-29 09:04:07.866109337 +0000 UTC m=+8939.537603894" Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.045103 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-59th4/crc-debug-92zm9"] Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.046888 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.185236 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/728d101d-a8a9-43d4-b8de-d4e38a346e1c-host\") pod \"crc-debug-92zm9\" (UID: \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\") " pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.185386 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6mn9\" (UniqueName: \"kubernetes.io/projected/728d101d-a8a9-43d4-b8de-d4e38a346e1c-kube-api-access-j6mn9\") pod \"crc-debug-92zm9\" (UID: \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\") " pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.287443 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6mn9\" (UniqueName: \"kubernetes.io/projected/728d101d-a8a9-43d4-b8de-d4e38a346e1c-kube-api-access-j6mn9\") pod \"crc-debug-92zm9\" (UID: \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\") " pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.287610 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/728d101d-a8a9-43d4-b8de-d4e38a346e1c-host\") pod \"crc-debug-92zm9\" (UID: \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\") " pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.287716 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/728d101d-a8a9-43d4-b8de-d4e38a346e1c-host\") pod \"crc-debug-92zm9\" (UID: \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\") " pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.307975 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6mn9\" (UniqueName: \"kubernetes.io/projected/728d101d-a8a9-43d4-b8de-d4e38a346e1c-kube-api-access-j6mn9\") pod \"crc-debug-92zm9\" (UID: \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\") " pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.365559 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:04:11 crc kubenswrapper[4861]: W0129 09:04:11.417151 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod728d101d_a8a9_43d4_b8de_d4e38a346e1c.slice/crio-1af73a5c00c1c063039233e1f10c9d255aa4fe6c4224825c9d75318e7e228d31 WatchSource:0}: Error finding container 1af73a5c00c1c063039233e1f10c9d255aa4fe6c4224825c9d75318e7e228d31: Status 404 returned error can't find the container with id 1af73a5c00c1c063039233e1f10c9d255aa4fe6c4224825c9d75318e7e228d31 Jan 29 09:04:11 crc kubenswrapper[4861]: I0129 09:04:11.900274 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/crc-debug-92zm9" event={"ID":"728d101d-a8a9-43d4-b8de-d4e38a346e1c","Type":"ContainerStarted","Data":"1af73a5c00c1c063039233e1f10c9d255aa4fe6c4224825c9d75318e7e228d31"} Jan 29 09:04:25 crc kubenswrapper[4861]: I0129 09:04:25.034762 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/crc-debug-92zm9" event={"ID":"728d101d-a8a9-43d4-b8de-d4e38a346e1c","Type":"ContainerStarted","Data":"bff605b224919f87472d80df2b416afa655a3f01a0d77a3f9c7778320e452c5c"} Jan 29 09:04:25 crc kubenswrapper[4861]: I0129 09:04:25.061231 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-59th4/crc-debug-92zm9" podStartSLOduration=1.391609897 podStartE2EDuration="14.061209702s" podCreationTimestamp="2026-01-29 09:04:11 +0000 UTC" firstStartedPulling="2026-01-29 09:04:11.419770803 +0000 UTC m=+8943.091265360" lastFinishedPulling="2026-01-29 09:04:24.089370608 +0000 UTC m=+8955.760865165" observedRunningTime="2026-01-29 09:04:25.050284713 +0000 UTC m=+8956.721779270" watchObservedRunningTime="2026-01-29 09:04:25.061209702 +0000 UTC m=+8956.732704279" Jan 29 09:04:30 crc kubenswrapper[4861]: I0129 09:04:30.630225 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:04:30 crc kubenswrapper[4861]: I0129 09:04:30.630910 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:04:30 crc kubenswrapper[4861]: I0129 09:04:30.630962 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 09:04:30 crc kubenswrapper[4861]: I0129 09:04:30.631862 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"61395d172ec3f374d64d044677c1415cf92c79889300b56f1ecd0883f2f5ce33"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 09:04:30 crc kubenswrapper[4861]: I0129 09:04:30.631919 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://61395d172ec3f374d64d044677c1415cf92c79889300b56f1ecd0883f2f5ce33" gracePeriod=600 Jan 29 09:04:31 crc kubenswrapper[4861]: I0129 09:04:31.099752 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="61395d172ec3f374d64d044677c1415cf92c79889300b56f1ecd0883f2f5ce33" exitCode=0 Jan 29 09:04:31 crc kubenswrapper[4861]: I0129 09:04:31.099795 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"61395d172ec3f374d64d044677c1415cf92c79889300b56f1ecd0883f2f5ce33"} Jan 29 09:04:31 crc kubenswrapper[4861]: I0129 09:04:31.100050 4861 scope.go:117] "RemoveContainer" containerID="b19f498e8e5fb52444ac49ed4d12316e7da99b0e34033a1a11fb067ac5886f8c" Jan 29 09:04:32 crc kubenswrapper[4861]: I0129 09:04:32.111141 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59"} Jan 29 09:05:13 crc kubenswrapper[4861]: I0129 09:05:13.522666 4861 generic.go:334] "Generic (PLEG): container finished" podID="728d101d-a8a9-43d4-b8de-d4e38a346e1c" containerID="bff605b224919f87472d80df2b416afa655a3f01a0d77a3f9c7778320e452c5c" exitCode=0 Jan 29 09:05:13 crc kubenswrapper[4861]: I0129 09:05:13.522755 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/crc-debug-92zm9" event={"ID":"728d101d-a8a9-43d4-b8de-d4e38a346e1c","Type":"ContainerDied","Data":"bff605b224919f87472d80df2b416afa655a3f01a0d77a3f9c7778320e452c5c"} Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.658263 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.697443 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-59th4/crc-debug-92zm9"] Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.706498 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-59th4/crc-debug-92zm9"] Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.741398 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/728d101d-a8a9-43d4-b8de-d4e38a346e1c-host\") pod \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\" (UID: \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\") " Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.741488 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6mn9\" (UniqueName: \"kubernetes.io/projected/728d101d-a8a9-43d4-b8de-d4e38a346e1c-kube-api-access-j6mn9\") pod \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\" (UID: \"728d101d-a8a9-43d4-b8de-d4e38a346e1c\") " Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.741543 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/728d101d-a8a9-43d4-b8de-d4e38a346e1c-host" (OuterVolumeSpecName: "host") pod "728d101d-a8a9-43d4-b8de-d4e38a346e1c" (UID: "728d101d-a8a9-43d4-b8de-d4e38a346e1c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.741953 4861 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/728d101d-a8a9-43d4-b8de-d4e38a346e1c-host\") on node \"crc\" DevicePath \"\"" Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.754469 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/728d101d-a8a9-43d4-b8de-d4e38a346e1c-kube-api-access-j6mn9" (OuterVolumeSpecName: "kube-api-access-j6mn9") pod "728d101d-a8a9-43d4-b8de-d4e38a346e1c" (UID: "728d101d-a8a9-43d4-b8de-d4e38a346e1c"). InnerVolumeSpecName "kube-api-access-j6mn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:05:14 crc kubenswrapper[4861]: I0129 09:05:14.844063 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6mn9\" (UniqueName: \"kubernetes.io/projected/728d101d-a8a9-43d4-b8de-d4e38a346e1c-kube-api-access-j6mn9\") on node \"crc\" DevicePath \"\"" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.129013 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="728d101d-a8a9-43d4-b8de-d4e38a346e1c" path="/var/lib/kubelet/pods/728d101d-a8a9-43d4-b8de-d4e38a346e1c/volumes" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.543041 4861 scope.go:117] "RemoveContainer" containerID="bff605b224919f87472d80df2b416afa655a3f01a0d77a3f9c7778320e452c5c" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.543123 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-92zm9" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.902093 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-59th4/crc-debug-w2v9s"] Jan 29 09:05:15 crc kubenswrapper[4861]: E0129 09:05:15.902634 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="728d101d-a8a9-43d4-b8de-d4e38a346e1c" containerName="container-00" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.902652 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="728d101d-a8a9-43d4-b8de-d4e38a346e1c" containerName="container-00" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.902937 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="728d101d-a8a9-43d4-b8de-d4e38a346e1c" containerName="container-00" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.903909 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.973947 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whwrn\" (UniqueName: \"kubernetes.io/projected/11b67dc8-7af7-4a75-852d-9ec0913e46d6-kube-api-access-whwrn\") pod \"crc-debug-w2v9s\" (UID: \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\") " pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:15 crc kubenswrapper[4861]: I0129 09:05:15.974027 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/11b67dc8-7af7-4a75-852d-9ec0913e46d6-host\") pod \"crc-debug-w2v9s\" (UID: \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\") " pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:16 crc kubenswrapper[4861]: I0129 09:05:16.075640 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whwrn\" (UniqueName: \"kubernetes.io/projected/11b67dc8-7af7-4a75-852d-9ec0913e46d6-kube-api-access-whwrn\") pod \"crc-debug-w2v9s\" (UID: \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\") " pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:16 crc kubenswrapper[4861]: I0129 09:05:16.075722 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/11b67dc8-7af7-4a75-852d-9ec0913e46d6-host\") pod \"crc-debug-w2v9s\" (UID: \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\") " pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:16 crc kubenswrapper[4861]: I0129 09:05:16.075885 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/11b67dc8-7af7-4a75-852d-9ec0913e46d6-host\") pod \"crc-debug-w2v9s\" (UID: \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\") " pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:16 crc kubenswrapper[4861]: I0129 09:05:16.095536 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whwrn\" (UniqueName: \"kubernetes.io/projected/11b67dc8-7af7-4a75-852d-9ec0913e46d6-kube-api-access-whwrn\") pod \"crc-debug-w2v9s\" (UID: \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\") " pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:16 crc kubenswrapper[4861]: I0129 09:05:16.220960 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:16 crc kubenswrapper[4861]: I0129 09:05:16.585422 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/crc-debug-w2v9s" event={"ID":"11b67dc8-7af7-4a75-852d-9ec0913e46d6","Type":"ContainerStarted","Data":"1446d6ef4f5daafba47a4c0977fd77be1e48da96a7ca8cd025c01ca94843e4b1"} Jan 29 09:05:17 crc kubenswrapper[4861]: I0129 09:05:17.595281 4861 generic.go:334] "Generic (PLEG): container finished" podID="11b67dc8-7af7-4a75-852d-9ec0913e46d6" containerID="721502295a9104acbba117beaf23498887541e2ee6b25bfab3407670f2eaf8df" exitCode=0 Jan 29 09:05:17 crc kubenswrapper[4861]: I0129 09:05:17.595337 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/crc-debug-w2v9s" event={"ID":"11b67dc8-7af7-4a75-852d-9ec0913e46d6","Type":"ContainerDied","Data":"721502295a9104acbba117beaf23498887541e2ee6b25bfab3407670f2eaf8df"} Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.390869 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-59th4/crc-debug-w2v9s"] Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.400806 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-59th4/crc-debug-w2v9s"] Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.723988 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.841177 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/11b67dc8-7af7-4a75-852d-9ec0913e46d6-host\") pod \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\" (UID: \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\") " Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.841273 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whwrn\" (UniqueName: \"kubernetes.io/projected/11b67dc8-7af7-4a75-852d-9ec0913e46d6-kube-api-access-whwrn\") pod \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\" (UID: \"11b67dc8-7af7-4a75-852d-9ec0913e46d6\") " Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.842642 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/11b67dc8-7af7-4a75-852d-9ec0913e46d6-host" (OuterVolumeSpecName: "host") pod "11b67dc8-7af7-4a75-852d-9ec0913e46d6" (UID: "11b67dc8-7af7-4a75-852d-9ec0913e46d6"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.847394 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11b67dc8-7af7-4a75-852d-9ec0913e46d6-kube-api-access-whwrn" (OuterVolumeSpecName: "kube-api-access-whwrn") pod "11b67dc8-7af7-4a75-852d-9ec0913e46d6" (UID: "11b67dc8-7af7-4a75-852d-9ec0913e46d6"). InnerVolumeSpecName "kube-api-access-whwrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.943399 4861 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/11b67dc8-7af7-4a75-852d-9ec0913e46d6-host\") on node \"crc\" DevicePath \"\"" Jan 29 09:05:18 crc kubenswrapper[4861]: I0129 09:05:18.944220 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whwrn\" (UniqueName: \"kubernetes.io/projected/11b67dc8-7af7-4a75-852d-9ec0913e46d6-kube-api-access-whwrn\") on node \"crc\" DevicePath \"\"" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.130666 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11b67dc8-7af7-4a75-852d-9ec0913e46d6" path="/var/lib/kubelet/pods/11b67dc8-7af7-4a75-852d-9ec0913e46d6/volumes" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.581698 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-59th4/crc-debug-pngfs"] Jan 29 09:05:19 crc kubenswrapper[4861]: E0129 09:05:19.582337 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11b67dc8-7af7-4a75-852d-9ec0913e46d6" containerName="container-00" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.582358 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="11b67dc8-7af7-4a75-852d-9ec0913e46d6" containerName="container-00" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.582605 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="11b67dc8-7af7-4a75-852d-9ec0913e46d6" containerName="container-00" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.583595 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.622152 4861 scope.go:117] "RemoveContainer" containerID="721502295a9104acbba117beaf23498887541e2ee6b25bfab3407670f2eaf8df" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.622218 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-w2v9s" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.667156 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmqtf\" (UniqueName: \"kubernetes.io/projected/d08979fe-f148-452f-98e4-bad92ef64031-kube-api-access-cmqtf\") pod \"crc-debug-pngfs\" (UID: \"d08979fe-f148-452f-98e4-bad92ef64031\") " pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.667267 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d08979fe-f148-452f-98e4-bad92ef64031-host\") pod \"crc-debug-pngfs\" (UID: \"d08979fe-f148-452f-98e4-bad92ef64031\") " pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.769479 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d08979fe-f148-452f-98e4-bad92ef64031-host\") pod \"crc-debug-pngfs\" (UID: \"d08979fe-f148-452f-98e4-bad92ef64031\") " pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.769623 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d08979fe-f148-452f-98e4-bad92ef64031-host\") pod \"crc-debug-pngfs\" (UID: \"d08979fe-f148-452f-98e4-bad92ef64031\") " pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.769790 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmqtf\" (UniqueName: \"kubernetes.io/projected/d08979fe-f148-452f-98e4-bad92ef64031-kube-api-access-cmqtf\") pod \"crc-debug-pngfs\" (UID: \"d08979fe-f148-452f-98e4-bad92ef64031\") " pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.793184 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmqtf\" (UniqueName: \"kubernetes.io/projected/d08979fe-f148-452f-98e4-bad92ef64031-kube-api-access-cmqtf\") pod \"crc-debug-pngfs\" (UID: \"d08979fe-f148-452f-98e4-bad92ef64031\") " pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:19 crc kubenswrapper[4861]: I0129 09:05:19.906541 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:19 crc kubenswrapper[4861]: W0129 09:05:19.933737 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd08979fe_f148_452f_98e4_bad92ef64031.slice/crio-ffabae569498fb1aff0e86d5de9180d3fb88fba24d69c12657c15e5dd38b73c4 WatchSource:0}: Error finding container ffabae569498fb1aff0e86d5de9180d3fb88fba24d69c12657c15e5dd38b73c4: Status 404 returned error can't find the container with id ffabae569498fb1aff0e86d5de9180d3fb88fba24d69c12657c15e5dd38b73c4 Jan 29 09:05:20 crc kubenswrapper[4861]: I0129 09:05:20.631747 4861 generic.go:334] "Generic (PLEG): container finished" podID="d08979fe-f148-452f-98e4-bad92ef64031" containerID="b559676a6f1b3c116775ce5758296229115500e678e718530c5bf116d11ec9cb" exitCode=0 Jan 29 09:05:20 crc kubenswrapper[4861]: I0129 09:05:20.631836 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/crc-debug-pngfs" event={"ID":"d08979fe-f148-452f-98e4-bad92ef64031","Type":"ContainerDied","Data":"b559676a6f1b3c116775ce5758296229115500e678e718530c5bf116d11ec9cb"} Jan 29 09:05:20 crc kubenswrapper[4861]: I0129 09:05:20.632114 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/crc-debug-pngfs" event={"ID":"d08979fe-f148-452f-98e4-bad92ef64031","Type":"ContainerStarted","Data":"ffabae569498fb1aff0e86d5de9180d3fb88fba24d69c12657c15e5dd38b73c4"} Jan 29 09:05:20 crc kubenswrapper[4861]: I0129 09:05:20.667096 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-59th4/crc-debug-pngfs"] Jan 29 09:05:20 crc kubenswrapper[4861]: I0129 09:05:20.676462 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-59th4/crc-debug-pngfs"] Jan 29 09:05:21 crc kubenswrapper[4861]: I0129 09:05:21.757438 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:21 crc kubenswrapper[4861]: I0129 09:05:21.814452 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmqtf\" (UniqueName: \"kubernetes.io/projected/d08979fe-f148-452f-98e4-bad92ef64031-kube-api-access-cmqtf\") pod \"d08979fe-f148-452f-98e4-bad92ef64031\" (UID: \"d08979fe-f148-452f-98e4-bad92ef64031\") " Jan 29 09:05:21 crc kubenswrapper[4861]: I0129 09:05:21.815166 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d08979fe-f148-452f-98e4-bad92ef64031-host\") pod \"d08979fe-f148-452f-98e4-bad92ef64031\" (UID: \"d08979fe-f148-452f-98e4-bad92ef64031\") " Jan 29 09:05:21 crc kubenswrapper[4861]: I0129 09:05:21.815300 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d08979fe-f148-452f-98e4-bad92ef64031-host" (OuterVolumeSpecName: "host") pod "d08979fe-f148-452f-98e4-bad92ef64031" (UID: "d08979fe-f148-452f-98e4-bad92ef64031"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 09:05:21 crc kubenswrapper[4861]: I0129 09:05:21.815732 4861 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d08979fe-f148-452f-98e4-bad92ef64031-host\") on node \"crc\" DevicePath \"\"" Jan 29 09:05:21 crc kubenswrapper[4861]: I0129 09:05:21.822540 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d08979fe-f148-452f-98e4-bad92ef64031-kube-api-access-cmqtf" (OuterVolumeSpecName: "kube-api-access-cmqtf") pod "d08979fe-f148-452f-98e4-bad92ef64031" (UID: "d08979fe-f148-452f-98e4-bad92ef64031"). InnerVolumeSpecName "kube-api-access-cmqtf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:05:21 crc kubenswrapper[4861]: I0129 09:05:21.918171 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmqtf\" (UniqueName: \"kubernetes.io/projected/d08979fe-f148-452f-98e4-bad92ef64031-kube-api-access-cmqtf\") on node \"crc\" DevicePath \"\"" Jan 29 09:05:22 crc kubenswrapper[4861]: I0129 09:05:22.654268 4861 scope.go:117] "RemoveContainer" containerID="b559676a6f1b3c116775ce5758296229115500e678e718530c5bf116d11ec9cb" Jan 29 09:05:22 crc kubenswrapper[4861]: I0129 09:05:22.654600 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/crc-debug-pngfs" Jan 29 09:05:23 crc kubenswrapper[4861]: I0129 09:05:23.128382 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d08979fe-f148-452f-98e4-bad92ef64031" path="/var/lib/kubelet/pods/d08979fe-f148-452f-98e4-bad92ef64031/volumes" Jan 29 09:07:00 crc kubenswrapper[4861]: I0129 09:07:00.630387 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:07:00 crc kubenswrapper[4861]: I0129 09:07:00.631198 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:07:30 crc kubenswrapper[4861]: I0129 09:07:30.629866 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:07:30 crc kubenswrapper[4861]: I0129 09:07:30.630405 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:08:00 crc kubenswrapper[4861]: I0129 09:08:00.629688 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:08:00 crc kubenswrapper[4861]: I0129 09:08:00.630239 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:08:00 crc kubenswrapper[4861]: I0129 09:08:00.630291 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 09:08:00 crc kubenswrapper[4861]: I0129 09:08:00.631095 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 09:08:00 crc kubenswrapper[4861]: I0129 09:08:00.631148 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" gracePeriod=600 Jan 29 09:08:00 crc kubenswrapper[4861]: E0129 09:08:00.749813 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:08:01 crc kubenswrapper[4861]: I0129 09:08:01.192987 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" exitCode=0 Jan 29 09:08:01 crc kubenswrapper[4861]: I0129 09:08:01.193023 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59"} Jan 29 09:08:01 crc kubenswrapper[4861]: I0129 09:08:01.193085 4861 scope.go:117] "RemoveContainer" containerID="61395d172ec3f374d64d044677c1415cf92c79889300b56f1ecd0883f2f5ce33" Jan 29 09:08:01 crc kubenswrapper[4861]: I0129 09:08:01.193937 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:08:01 crc kubenswrapper[4861]: E0129 09:08:01.194489 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:08:15 crc kubenswrapper[4861]: I0129 09:08:15.118526 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:08:15 crc kubenswrapper[4861]: E0129 09:08:15.119787 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:08:22 crc kubenswrapper[4861]: I0129 09:08:22.036343 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-578c88c7d6-cxzrb" podUID="7a3b8613-fa03-4ee0-90f0-bc02abb0e72b" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Jan 29 09:08:29 crc kubenswrapper[4861]: I0129 09:08:29.126798 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:08:29 crc kubenswrapper[4861]: E0129 09:08:29.127695 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:08:44 crc kubenswrapper[4861]: I0129 09:08:44.117026 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:08:44 crc kubenswrapper[4861]: E0129 09:08:44.117896 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:08:59 crc kubenswrapper[4861]: I0129 09:08:59.124647 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:08:59 crc kubenswrapper[4861]: E0129 09:08:59.125351 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:09:13 crc kubenswrapper[4861]: I0129 09:09:13.116921 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:09:13 crc kubenswrapper[4861]: E0129 09:09:13.117883 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:09:28 crc kubenswrapper[4861]: I0129 09:09:28.117135 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:09:28 crc kubenswrapper[4861]: E0129 09:09:28.118023 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:09:39 crc kubenswrapper[4861]: I0129 09:09:39.123828 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:09:39 crc kubenswrapper[4861]: E0129 09:09:39.124643 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:09:53 crc kubenswrapper[4861]: I0129 09:09:53.117315 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:09:53 crc kubenswrapper[4861]: E0129 09:09:53.117985 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:10:07 crc kubenswrapper[4861]: I0129 09:10:07.117927 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:10:07 crc kubenswrapper[4861]: E0129 09:10:07.118752 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:10:15 crc kubenswrapper[4861]: I0129 09:10:15.825159 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sr5h6"] Jan 29 09:10:15 crc kubenswrapper[4861]: E0129 09:10:15.826138 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d08979fe-f148-452f-98e4-bad92ef64031" containerName="container-00" Jan 29 09:10:15 crc kubenswrapper[4861]: I0129 09:10:15.826151 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d08979fe-f148-452f-98e4-bad92ef64031" containerName="container-00" Jan 29 09:10:15 crc kubenswrapper[4861]: I0129 09:10:15.826357 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d08979fe-f148-452f-98e4-bad92ef64031" containerName="container-00" Jan 29 09:10:15 crc kubenswrapper[4861]: I0129 09:10:15.827832 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:15 crc kubenswrapper[4861]: I0129 09:10:15.850193 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sr5h6"] Jan 29 09:10:15 crc kubenswrapper[4861]: I0129 09:10:15.934034 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gsj6\" (UniqueName: \"kubernetes.io/projected/5daea67e-e5ae-4824-9c29-c648a981402f-kube-api-access-7gsj6\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:15 crc kubenswrapper[4861]: I0129 09:10:15.934720 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-utilities\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:15 crc kubenswrapper[4861]: I0129 09:10:15.934977 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-catalog-content\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:16 crc kubenswrapper[4861]: I0129 09:10:16.037961 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gsj6\" (UniqueName: \"kubernetes.io/projected/5daea67e-e5ae-4824-9c29-c648a981402f-kube-api-access-7gsj6\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:16 crc kubenswrapper[4861]: I0129 09:10:16.038238 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-utilities\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:16 crc kubenswrapper[4861]: I0129 09:10:16.038354 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-catalog-content\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:16 crc kubenswrapper[4861]: I0129 09:10:16.039292 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-utilities\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:16 crc kubenswrapper[4861]: I0129 09:10:16.039339 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-catalog-content\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:16 crc kubenswrapper[4861]: I0129 09:10:16.061693 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gsj6\" (UniqueName: \"kubernetes.io/projected/5daea67e-e5ae-4824-9c29-c648a981402f-kube-api-access-7gsj6\") pod \"community-operators-sr5h6\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:16 crc kubenswrapper[4861]: I0129 09:10:16.154709 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:17 crc kubenswrapper[4861]: I0129 09:10:17.295880 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sr5h6"] Jan 29 09:10:17 crc kubenswrapper[4861]: I0129 09:10:17.629261 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr5h6" event={"ID":"5daea67e-e5ae-4824-9c29-c648a981402f","Type":"ContainerStarted","Data":"73c92a54ea8eb6bb52986c557964193341b4cabe0885b7baafead3092a1aaf2e"} Jan 29 09:10:18 crc kubenswrapper[4861]: I0129 09:10:18.642518 4861 generic.go:334] "Generic (PLEG): container finished" podID="5daea67e-e5ae-4824-9c29-c648a981402f" containerID="811aa96f09085f8ce67572b5038493d8c052c782d4709b1fbb03d5992bae6253" exitCode=0 Jan 29 09:10:18 crc kubenswrapper[4861]: I0129 09:10:18.642576 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr5h6" event={"ID":"5daea67e-e5ae-4824-9c29-c648a981402f","Type":"ContainerDied","Data":"811aa96f09085f8ce67572b5038493d8c052c782d4709b1fbb03d5992bae6253"} Jan 29 09:10:18 crc kubenswrapper[4861]: I0129 09:10:18.645225 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 09:10:19 crc kubenswrapper[4861]: I0129 09:10:19.116926 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:10:19 crc kubenswrapper[4861]: E0129 09:10:19.117652 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:10:20 crc kubenswrapper[4861]: I0129 09:10:20.669270 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr5h6" event={"ID":"5daea67e-e5ae-4824-9c29-c648a981402f","Type":"ContainerStarted","Data":"51f3e5b9d8cedeb60f815405a48be12bd0e9fc33c29b4d61c1585d71f2c6458e"} Jan 29 09:10:23 crc kubenswrapper[4861]: I0129 09:10:23.700557 4861 generic.go:334] "Generic (PLEG): container finished" podID="5daea67e-e5ae-4824-9c29-c648a981402f" containerID="51f3e5b9d8cedeb60f815405a48be12bd0e9fc33c29b4d61c1585d71f2c6458e" exitCode=0 Jan 29 09:10:23 crc kubenswrapper[4861]: I0129 09:10:23.700645 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr5h6" event={"ID":"5daea67e-e5ae-4824-9c29-c648a981402f","Type":"ContainerDied","Data":"51f3e5b9d8cedeb60f815405a48be12bd0e9fc33c29b4d61c1585d71f2c6458e"} Jan 29 09:10:25 crc kubenswrapper[4861]: I0129 09:10:25.722040 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr5h6" event={"ID":"5daea67e-e5ae-4824-9c29-c648a981402f","Type":"ContainerStarted","Data":"c75d0b621bee712a2a98aac30703247cca191f7ed5234085339fa5b693c1d8b9"} Jan 29 09:10:25 crc kubenswrapper[4861]: I0129 09:10:25.743557 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sr5h6" podStartSLOduration=4.911790902 podStartE2EDuration="10.743530019s" podCreationTimestamp="2026-01-29 09:10:15 +0000 UTC" firstStartedPulling="2026-01-29 09:10:18.644849123 +0000 UTC m=+9310.316343680" lastFinishedPulling="2026-01-29 09:10:24.47658824 +0000 UTC m=+9316.148082797" observedRunningTime="2026-01-29 09:10:25.73715989 +0000 UTC m=+9317.408654467" watchObservedRunningTime="2026-01-29 09:10:25.743530019 +0000 UTC m=+9317.415024576" Jan 29 09:10:26 crc kubenswrapper[4861]: I0129 09:10:26.157186 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:26 crc kubenswrapper[4861]: I0129 09:10:26.157497 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:27 crc kubenswrapper[4861]: I0129 09:10:27.204681 4861 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-sr5h6" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="registry-server" probeResult="failure" output=< Jan 29 09:10:27 crc kubenswrapper[4861]: timeout: failed to connect service ":50051" within 1s Jan 29 09:10:27 crc kubenswrapper[4861]: > Jan 29 09:10:34 crc kubenswrapper[4861]: I0129 09:10:34.116766 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:10:34 crc kubenswrapper[4861]: E0129 09:10:34.117795 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:10:36 crc kubenswrapper[4861]: I0129 09:10:36.207588 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:36 crc kubenswrapper[4861]: I0129 09:10:36.264877 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:36 crc kubenswrapper[4861]: I0129 09:10:36.449636 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sr5h6"] Jan 29 09:10:37 crc kubenswrapper[4861]: I0129 09:10:37.845798 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sr5h6" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="registry-server" containerID="cri-o://c75d0b621bee712a2a98aac30703247cca191f7ed5234085339fa5b693c1d8b9" gracePeriod=2 Jan 29 09:10:38 crc kubenswrapper[4861]: I0129 09:10:38.859708 4861 generic.go:334] "Generic (PLEG): container finished" podID="5daea67e-e5ae-4824-9c29-c648a981402f" containerID="c75d0b621bee712a2a98aac30703247cca191f7ed5234085339fa5b693c1d8b9" exitCode=0 Jan 29 09:10:38 crc kubenswrapper[4861]: I0129 09:10:38.859788 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr5h6" event={"ID":"5daea67e-e5ae-4824-9c29-c648a981402f","Type":"ContainerDied","Data":"c75d0b621bee712a2a98aac30703247cca191f7ed5234085339fa5b693c1d8b9"} Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.420630 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.461219 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-utilities\") pod \"5daea67e-e5ae-4824-9c29-c648a981402f\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.461561 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-catalog-content\") pod \"5daea67e-e5ae-4824-9c29-c648a981402f\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.461769 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gsj6\" (UniqueName: \"kubernetes.io/projected/5daea67e-e5ae-4824-9c29-c648a981402f-kube-api-access-7gsj6\") pod \"5daea67e-e5ae-4824-9c29-c648a981402f\" (UID: \"5daea67e-e5ae-4824-9c29-c648a981402f\") " Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.462261 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-utilities" (OuterVolumeSpecName: "utilities") pod "5daea67e-e5ae-4824-9c29-c648a981402f" (UID: "5daea67e-e5ae-4824-9c29-c648a981402f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.463406 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.470231 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5daea67e-e5ae-4824-9c29-c648a981402f-kube-api-access-7gsj6" (OuterVolumeSpecName: "kube-api-access-7gsj6") pod "5daea67e-e5ae-4824-9c29-c648a981402f" (UID: "5daea67e-e5ae-4824-9c29-c648a981402f"). InnerVolumeSpecName "kube-api-access-7gsj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.530308 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5daea67e-e5ae-4824-9c29-c648a981402f" (UID: "5daea67e-e5ae-4824-9c29-c648a981402f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.565485 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5daea67e-e5ae-4824-9c29-c648a981402f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.565557 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gsj6\" (UniqueName: \"kubernetes.io/projected/5daea67e-e5ae-4824-9c29-c648a981402f-kube-api-access-7gsj6\") on node \"crc\" DevicePath \"\"" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.874672 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sr5h6" event={"ID":"5daea67e-e5ae-4824-9c29-c648a981402f","Type":"ContainerDied","Data":"73c92a54ea8eb6bb52986c557964193341b4cabe0885b7baafead3092a1aaf2e"} Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.874723 4861 scope.go:117] "RemoveContainer" containerID="c75d0b621bee712a2a98aac30703247cca191f7ed5234085339fa5b693c1d8b9" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.874729 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sr5h6" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.914857 4861 scope.go:117] "RemoveContainer" containerID="51f3e5b9d8cedeb60f815405a48be12bd0e9fc33c29b4d61c1585d71f2c6458e" Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.929429 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sr5h6"] Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.939916 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sr5h6"] Jan 29 09:10:39 crc kubenswrapper[4861]: I0129 09:10:39.940260 4861 scope.go:117] "RemoveContainer" containerID="811aa96f09085f8ce67572b5038493d8c052c782d4709b1fbb03d5992bae6253" Jan 29 09:10:41 crc kubenswrapper[4861]: I0129 09:10:41.128340 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" path="/var/lib/kubelet/pods/5daea67e-e5ae-4824-9c29-c648a981402f/volumes" Jan 29 09:10:48 crc kubenswrapper[4861]: I0129 09:10:48.116707 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:10:48 crc kubenswrapper[4861]: E0129 09:10:48.117414 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:11:00 crc kubenswrapper[4861]: I0129 09:11:00.116663 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:11:00 crc kubenswrapper[4861]: E0129 09:11:00.117544 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:11:11 crc kubenswrapper[4861]: I0129 09:11:11.117378 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:11:11 crc kubenswrapper[4861]: E0129 09:11:11.118194 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:11:26 crc kubenswrapper[4861]: I0129 09:11:26.116666 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:11:26 crc kubenswrapper[4861]: E0129 09:11:26.117439 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:11:37 crc kubenswrapper[4861]: I0129 09:11:37.116891 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:11:37 crc kubenswrapper[4861]: E0129 09:11:37.117838 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:11:51 crc kubenswrapper[4861]: I0129 09:11:51.117511 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:11:51 crc kubenswrapper[4861]: E0129 09:11:51.118259 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:12:03 crc kubenswrapper[4861]: I0129 09:12:03.117310 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:12:03 crc kubenswrapper[4861]: E0129 09:12:03.118146 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:12:15 crc kubenswrapper[4861]: I0129 09:12:15.116827 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:12:15 crc kubenswrapper[4861]: E0129 09:12:15.117644 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:12:28 crc kubenswrapper[4861]: I0129 09:12:28.116929 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:12:28 crc kubenswrapper[4861]: E0129 09:12:28.117775 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:12:34 crc kubenswrapper[4861]: I0129 09:12:34.990912 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bmw92"] Jan 29 09:12:34 crc kubenswrapper[4861]: E0129 09:12:34.993172 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="registry-server" Jan 29 09:12:34 crc kubenswrapper[4861]: I0129 09:12:34.993189 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="registry-server" Jan 29 09:12:34 crc kubenswrapper[4861]: E0129 09:12:34.993212 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="extract-utilities" Jan 29 09:12:34 crc kubenswrapper[4861]: I0129 09:12:34.993220 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="extract-utilities" Jan 29 09:12:34 crc kubenswrapper[4861]: E0129 09:12:34.993248 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="extract-content" Jan 29 09:12:34 crc kubenswrapper[4861]: I0129 09:12:34.993256 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="extract-content" Jan 29 09:12:34 crc kubenswrapper[4861]: I0129 09:12:34.993508 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="5daea67e-e5ae-4824-9c29-c648a981402f" containerName="registry-server" Jan 29 09:12:34 crc kubenswrapper[4861]: I0129 09:12:34.995518 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.014603 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bmw92"] Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.059522 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-utilities\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.059743 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-catalog-content\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.060272 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shw72\" (UniqueName: \"kubernetes.io/projected/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-kube-api-access-shw72\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.163688 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shw72\" (UniqueName: \"kubernetes.io/projected/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-kube-api-access-shw72\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.163918 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-utilities\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.164116 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-catalog-content\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.164556 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-utilities\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.164690 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-catalog-content\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.782496 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shw72\" (UniqueName: \"kubernetes.io/projected/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-kube-api-access-shw72\") pod \"redhat-operators-bmw92\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:35 crc kubenswrapper[4861]: I0129 09:12:35.922014 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:36 crc kubenswrapper[4861]: I0129 09:12:36.464051 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bmw92"] Jan 29 09:12:37 crc kubenswrapper[4861]: I0129 09:12:37.051007 4861 generic.go:334] "Generic (PLEG): container finished" podID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerID="2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b" exitCode=0 Jan 29 09:12:37 crc kubenswrapper[4861]: I0129 09:12:37.051061 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bmw92" event={"ID":"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6","Type":"ContainerDied","Data":"2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b"} Jan 29 09:12:37 crc kubenswrapper[4861]: I0129 09:12:37.051358 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bmw92" event={"ID":"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6","Type":"ContainerStarted","Data":"c47df51c491e0900bd3e3fc6fa7a13dd72874a8b9011aae4173e534c7bd2074b"} Jan 29 09:12:39 crc kubenswrapper[4861]: I0129 09:12:39.079013 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bmw92" event={"ID":"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6","Type":"ContainerStarted","Data":"850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83"} Jan 29 09:12:40 crc kubenswrapper[4861]: I0129 09:12:40.117434 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:12:40 crc kubenswrapper[4861]: E0129 09:12:40.118038 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:12:47 crc kubenswrapper[4861]: I0129 09:12:47.156682 4861 generic.go:334] "Generic (PLEG): container finished" podID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerID="850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83" exitCode=0 Jan 29 09:12:47 crc kubenswrapper[4861]: I0129 09:12:47.156764 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bmw92" event={"ID":"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6","Type":"ContainerDied","Data":"850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83"} Jan 29 09:12:49 crc kubenswrapper[4861]: I0129 09:12:49.179543 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bmw92" event={"ID":"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6","Type":"ContainerStarted","Data":"b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a"} Jan 29 09:12:49 crc kubenswrapper[4861]: I0129 09:12:49.211689 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bmw92" podStartSLOduration=3.781291654 podStartE2EDuration="15.211665934s" podCreationTimestamp="2026-01-29 09:12:34 +0000 UTC" firstStartedPulling="2026-01-29 09:12:37.053837029 +0000 UTC m=+9448.725331586" lastFinishedPulling="2026-01-29 09:12:48.484211309 +0000 UTC m=+9460.155705866" observedRunningTime="2026-01-29 09:12:49.201287237 +0000 UTC m=+9460.872781794" watchObservedRunningTime="2026-01-29 09:12:49.211665934 +0000 UTC m=+9460.883160491" Jan 29 09:12:51 crc kubenswrapper[4861]: I0129 09:12:51.116861 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:12:51 crc kubenswrapper[4861]: E0129 09:12:51.117574 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:12:55 crc kubenswrapper[4861]: I0129 09:12:55.922692 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:55 crc kubenswrapper[4861]: I0129 09:12:55.923256 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:56 crc kubenswrapper[4861]: I0129 09:12:56.133879 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:56 crc kubenswrapper[4861]: I0129 09:12:56.341296 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:56 crc kubenswrapper[4861]: I0129 09:12:56.437854 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bmw92"] Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.268527 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bmw92" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerName="registry-server" containerID="cri-o://b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a" gracePeriod=2 Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.739264 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.850514 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-catalog-content\") pod \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.850619 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shw72\" (UniqueName: \"kubernetes.io/projected/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-kube-api-access-shw72\") pod \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.850681 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-utilities\") pod \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\" (UID: \"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6\") " Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.851409 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-utilities" (OuterVolumeSpecName: "utilities") pod "36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" (UID: "36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.860039 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-kube-api-access-shw72" (OuterVolumeSpecName: "kube-api-access-shw72") pod "36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" (UID: "36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6"). InnerVolumeSpecName "kube-api-access-shw72". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.956513 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shw72\" (UniqueName: \"kubernetes.io/projected/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-kube-api-access-shw72\") on node \"crc\" DevicePath \"\"" Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.956592 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:12:58 crc kubenswrapper[4861]: I0129 09:12:58.987237 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" (UID: "36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.060032 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.282152 4861 generic.go:334] "Generic (PLEG): container finished" podID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerID="b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a" exitCode=0 Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.282195 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bmw92" event={"ID":"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6","Type":"ContainerDied","Data":"b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a"} Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.282224 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bmw92" event={"ID":"36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6","Type":"ContainerDied","Data":"c47df51c491e0900bd3e3fc6fa7a13dd72874a8b9011aae4173e534c7bd2074b"} Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.282240 4861 scope.go:117] "RemoveContainer" containerID="b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.282393 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bmw92" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.317211 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bmw92"] Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.320746 4861 scope.go:117] "RemoveContainer" containerID="850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.329401 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bmw92"] Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.345477 4861 scope.go:117] "RemoveContainer" containerID="2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.393542 4861 scope.go:117] "RemoveContainer" containerID="b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a" Jan 29 09:12:59 crc kubenswrapper[4861]: E0129 09:12:59.393948 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a\": container with ID starting with b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a not found: ID does not exist" containerID="b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.394051 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a"} err="failed to get container status \"b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a\": rpc error: code = NotFound desc = could not find container \"b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a\": container with ID starting with b9cea1d33a0b9424a28bf2b5fbe4eeecac1704927e0bb6547567f6003eb3d95a not found: ID does not exist" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.394103 4861 scope.go:117] "RemoveContainer" containerID="850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83" Jan 29 09:12:59 crc kubenswrapper[4861]: E0129 09:12:59.394558 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83\": container with ID starting with 850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83 not found: ID does not exist" containerID="850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.394592 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83"} err="failed to get container status \"850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83\": rpc error: code = NotFound desc = could not find container \"850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83\": container with ID starting with 850d92d29b62aad9262b553d5015b92c6ab303a4ef94dc9f738849b36b014d83 not found: ID does not exist" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.394613 4861 scope.go:117] "RemoveContainer" containerID="2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b" Jan 29 09:12:59 crc kubenswrapper[4861]: E0129 09:12:59.395003 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b\": container with ID starting with 2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b not found: ID does not exist" containerID="2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b" Jan 29 09:12:59 crc kubenswrapper[4861]: I0129 09:12:59.395143 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b"} err="failed to get container status \"2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b\": rpc error: code = NotFound desc = could not find container \"2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b\": container with ID starting with 2f6e6da6dfda7199ded2c97cfd1ab9efb02a615c7e51e9e28d5120b8c2754a1b not found: ID does not exist" Jan 29 09:13:01 crc kubenswrapper[4861]: I0129 09:13:01.130145 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" path="/var/lib/kubelet/pods/36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6/volumes" Jan 29 09:13:03 crc kubenswrapper[4861]: I0129 09:13:03.117252 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:13:04 crc kubenswrapper[4861]: I0129 09:13:04.355515 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"c6f1b77eb0a98192a07ee6373d3161e796a4f5d373da3b2d30e4e75fe52c4f99"} Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.313826 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5f8"] Jan 29 09:13:15 crc kubenswrapper[4861]: E0129 09:13:15.314803 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerName="extract-content" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.314820 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerName="extract-content" Jan 29 09:13:15 crc kubenswrapper[4861]: E0129 09:13:15.314852 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerName="extract-utilities" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.314858 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerName="extract-utilities" Jan 29 09:13:15 crc kubenswrapper[4861]: E0129 09:13:15.314870 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerName="registry-server" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.314875 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerName="registry-server" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.315111 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="36aebb81-8e9a-4926-b3fb-ac6e3b11b8a6" containerName="registry-server" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.316715 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.327092 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5f8"] Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.370885 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-catalog-content\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.371100 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-utilities\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.371393 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lddsf\" (UniqueName: \"kubernetes.io/projected/a4fb441d-4394-469f-8381-16ae9f6bb072-kube-api-access-lddsf\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.473914 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lddsf\" (UniqueName: \"kubernetes.io/projected/a4fb441d-4394-469f-8381-16ae9f6bb072-kube-api-access-lddsf\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.474193 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-catalog-content\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.474291 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-utilities\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.474996 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-utilities\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.475187 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-catalog-content\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.497622 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lddsf\" (UniqueName: \"kubernetes.io/projected/a4fb441d-4394-469f-8381-16ae9f6bb072-kube-api-access-lddsf\") pod \"redhat-marketplace-nj5f8\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:15 crc kubenswrapper[4861]: I0129 09:13:15.636702 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:16 crc kubenswrapper[4861]: I0129 09:13:16.168680 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5f8"] Jan 29 09:13:16 crc kubenswrapper[4861]: I0129 09:13:16.525868 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5f8" event={"ID":"a4fb441d-4394-469f-8381-16ae9f6bb072","Type":"ContainerStarted","Data":"1864a1492d5f58b81320905c1c4661b570c2b8d08e64a9866a31b2329946b8ab"} Jan 29 09:13:17 crc kubenswrapper[4861]: I0129 09:13:17.538682 4861 generic.go:334] "Generic (PLEG): container finished" podID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerID="a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b" exitCode=0 Jan 29 09:13:17 crc kubenswrapper[4861]: I0129 09:13:17.538792 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5f8" event={"ID":"a4fb441d-4394-469f-8381-16ae9f6bb072","Type":"ContainerDied","Data":"a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b"} Jan 29 09:13:19 crc kubenswrapper[4861]: I0129 09:13:19.560096 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5f8" event={"ID":"a4fb441d-4394-469f-8381-16ae9f6bb072","Type":"ContainerStarted","Data":"058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75"} Jan 29 09:13:20 crc kubenswrapper[4861]: I0129 09:13:20.572732 4861 generic.go:334] "Generic (PLEG): container finished" podID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerID="058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75" exitCode=0 Jan 29 09:13:20 crc kubenswrapper[4861]: I0129 09:13:20.572874 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5f8" event={"ID":"a4fb441d-4394-469f-8381-16ae9f6bb072","Type":"ContainerDied","Data":"058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75"} Jan 29 09:13:21 crc kubenswrapper[4861]: I0129 09:13:21.585259 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5f8" event={"ID":"a4fb441d-4394-469f-8381-16ae9f6bb072","Type":"ContainerStarted","Data":"984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2"} Jan 29 09:13:21 crc kubenswrapper[4861]: I0129 09:13:21.613944 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nj5f8" podStartSLOduration=2.9875769549999998 podStartE2EDuration="6.613924361s" podCreationTimestamp="2026-01-29 09:13:15 +0000 UTC" firstStartedPulling="2026-01-29 09:13:17.5403998 +0000 UTC m=+9489.211894357" lastFinishedPulling="2026-01-29 09:13:21.166747206 +0000 UTC m=+9492.838241763" observedRunningTime="2026-01-29 09:13:21.60266185 +0000 UTC m=+9493.274156427" watchObservedRunningTime="2026-01-29 09:13:21.613924361 +0000 UTC m=+9493.285418908" Jan 29 09:13:25 crc kubenswrapper[4861]: I0129 09:13:25.637817 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:25 crc kubenswrapper[4861]: I0129 09:13:25.638706 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:25 crc kubenswrapper[4861]: I0129 09:13:25.690268 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:26 crc kubenswrapper[4861]: I0129 09:13:26.956355 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:27 crc kubenswrapper[4861]: I0129 09:13:27.009095 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5f8"] Jan 29 09:13:28 crc kubenswrapper[4861]: I0129 09:13:28.660562 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nj5f8" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerName="registry-server" containerID="cri-o://984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2" gracePeriod=2 Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.282386 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.417331 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-catalog-content\") pod \"a4fb441d-4394-469f-8381-16ae9f6bb072\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.417921 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-utilities\") pod \"a4fb441d-4394-469f-8381-16ae9f6bb072\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.417983 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lddsf\" (UniqueName: \"kubernetes.io/projected/a4fb441d-4394-469f-8381-16ae9f6bb072-kube-api-access-lddsf\") pod \"a4fb441d-4394-469f-8381-16ae9f6bb072\" (UID: \"a4fb441d-4394-469f-8381-16ae9f6bb072\") " Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.418875 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-utilities" (OuterVolumeSpecName: "utilities") pod "a4fb441d-4394-469f-8381-16ae9f6bb072" (UID: "a4fb441d-4394-469f-8381-16ae9f6bb072"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.425573 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4fb441d-4394-469f-8381-16ae9f6bb072-kube-api-access-lddsf" (OuterVolumeSpecName: "kube-api-access-lddsf") pod "a4fb441d-4394-469f-8381-16ae9f6bb072" (UID: "a4fb441d-4394-469f-8381-16ae9f6bb072"). InnerVolumeSpecName "kube-api-access-lddsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.446784 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a4fb441d-4394-469f-8381-16ae9f6bb072" (UID: "a4fb441d-4394-469f-8381-16ae9f6bb072"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.521733 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.521804 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lddsf\" (UniqueName: \"kubernetes.io/projected/a4fb441d-4394-469f-8381-16ae9f6bb072-kube-api-access-lddsf\") on node \"crc\" DevicePath \"\"" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.521827 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4fb441d-4394-469f-8381-16ae9f6bb072-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.673358 4861 generic.go:334] "Generic (PLEG): container finished" podID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerID="984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2" exitCode=0 Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.673413 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5f8" event={"ID":"a4fb441d-4394-469f-8381-16ae9f6bb072","Type":"ContainerDied","Data":"984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2"} Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.673487 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5f8" event={"ID":"a4fb441d-4394-469f-8381-16ae9f6bb072","Type":"ContainerDied","Data":"1864a1492d5f58b81320905c1c4661b570c2b8d08e64a9866a31b2329946b8ab"} Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.673506 4861 scope.go:117] "RemoveContainer" containerID="984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.673565 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nj5f8" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.711499 4861 scope.go:117] "RemoveContainer" containerID="058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.723522 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5f8"] Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.734041 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5f8"] Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.741801 4861 scope.go:117] "RemoveContainer" containerID="a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.802554 4861 scope.go:117] "RemoveContainer" containerID="984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2" Jan 29 09:13:29 crc kubenswrapper[4861]: E0129 09:13:29.803110 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2\": container with ID starting with 984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2 not found: ID does not exist" containerID="984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.803147 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2"} err="failed to get container status \"984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2\": rpc error: code = NotFound desc = could not find container \"984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2\": container with ID starting with 984768fab99150a5d6f67bb5bab4c1781447b9d3aa6d752f78dfcd0b2e8568a2 not found: ID does not exist" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.803173 4861 scope.go:117] "RemoveContainer" containerID="058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75" Jan 29 09:13:29 crc kubenswrapper[4861]: E0129 09:13:29.803546 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75\": container with ID starting with 058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75 not found: ID does not exist" containerID="058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.803609 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75"} err="failed to get container status \"058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75\": rpc error: code = NotFound desc = could not find container \"058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75\": container with ID starting with 058735714a9754ad728cccc12bd3724a4ad4b3049753e50d1928207635b9ea75 not found: ID does not exist" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.803635 4861 scope.go:117] "RemoveContainer" containerID="a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b" Jan 29 09:13:29 crc kubenswrapper[4861]: E0129 09:13:29.804061 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b\": container with ID starting with a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b not found: ID does not exist" containerID="a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b" Jan 29 09:13:29 crc kubenswrapper[4861]: I0129 09:13:29.804204 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b"} err="failed to get container status \"a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b\": rpc error: code = NotFound desc = could not find container \"a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b\": container with ID starting with a7b99d650545441311b6d8234362544ad91fd9ede468e46cb40e70e70e43cb4b not found: ID does not exist" Jan 29 09:13:31 crc kubenswrapper[4861]: I0129 09:13:31.130195 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" path="/var/lib/kubelet/pods/a4fb441d-4394-469f-8381-16ae9f6bb072/volumes" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.057653 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_e84fca0c-fb5a-450c-8b3a-3d378ff73299/init-config-reloader/0.log" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.265947 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_e84fca0c-fb5a-450c-8b3a-3d378ff73299/init-config-reloader/0.log" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.344056 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_e84fca0c-fb5a-450c-8b3a-3d378ff73299/config-reloader/0.log" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.353633 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_e84fca0c-fb5a-450c-8b3a-3d378ff73299/alertmanager/0.log" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.615013 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_9b56ac80-dc8c-477f-b911-c975cc701551/aodh-api/0.log" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.623480 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_9b56ac80-dc8c-477f-b911-c975cc701551/aodh-evaluator/0.log" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.767290 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_9b56ac80-dc8c-477f-b911-c975cc701551/aodh-listener/0.log" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.843254 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_9b56ac80-dc8c-477f-b911-c975cc701551/aodh-notifier/0.log" Jan 29 09:14:04 crc kubenswrapper[4861]: I0129 09:14:04.911120 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6d596d8b5b-rdh75_ce853f12-bafd-4fbc-90e9-ab2802a25722/barbican-api/0.log" Jan 29 09:14:05 crc kubenswrapper[4861]: I0129 09:14:05.005082 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6d596d8b5b-rdh75_ce853f12-bafd-4fbc-90e9-ab2802a25722/barbican-api-log/0.log" Jan 29 09:14:05 crc kubenswrapper[4861]: I0129 09:14:05.140592 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-c6c4999b6-q78x5_3b93d448-26b7-41f9-96aa-1aa8b0a6f134/barbican-keystone-listener/0.log" Jan 29 09:14:05 crc kubenswrapper[4861]: I0129 09:14:05.197629 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-c6c4999b6-q78x5_3b93d448-26b7-41f9-96aa-1aa8b0a6f134/barbican-keystone-listener-log/0.log" Jan 29 09:14:05 crc kubenswrapper[4861]: I0129 09:14:05.393149 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7749945477-kz7r5_734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171/barbican-worker/0.log" Jan 29 09:14:05 crc kubenswrapper[4861]: I0129 09:14:05.437698 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7749945477-kz7r5_734dc0b4-f5ec-47ae-a9e5-cf0b48c0a171/barbican-worker-log/0.log" Jan 29 09:14:05 crc kubenswrapper[4861]: I0129 09:14:05.818501 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-tz2gk_17876c4e-e24a-477b-86bf-aa99c0ae2803/bootstrap-openstack-openstack-cell1/0.log" Jan 29 09:14:05 crc kubenswrapper[4861]: I0129 09:14:05.917087 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f3b00fe8-8fc2-4499-8121-fc90f5be37dd/ceilometer-central-agent/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.043396 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f3b00fe8-8fc2-4499-8121-fc90f5be37dd/ceilometer-notification-agent/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.078667 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f3b00fe8-8fc2-4499-8121-fc90f5be37dd/proxy-httpd/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.174224 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f3b00fe8-8fc2-4499-8121-fc90f5be37dd/sg-core/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.362766 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1/cinder-api-log/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.367442 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_1e3fd03b-ef86-4ecb-a17a-a0c2555d24e1/cinder-api/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.560544 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_0139c78d-25f6-42a2-be1f-49a40decaaae/cinder-scheduler/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.652896 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_0139c78d-25f6-42a2-be1f-49a40decaaae/probe/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.814918 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-l5f5v_c61e4b86-43cc-4b96-85dc-74a0d969b4e5/configure-network-openstack-openstack-cell1/0.log" Jan 29 09:14:06 crc kubenswrapper[4861]: I0129 09:14:06.873176 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-5kzn6_b9c6b766-edc9-4b0b-a2d2-a54171a0570a/configure-os-openstack-openstack-cell1/0.log" Jan 29 09:14:07 crc kubenswrapper[4861]: I0129 09:14:07.114483 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5585ff97bc-f7zkv_ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70/init/0.log" Jan 29 09:14:07 crc kubenswrapper[4861]: I0129 09:14:07.323277 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5585ff97bc-f7zkv_ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70/init/0.log" Jan 29 09:14:07 crc kubenswrapper[4861]: I0129 09:14:07.381344 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-l99sq_4ab2f092-f6e2-4db8-b5ac-bfb83fada584/download-cache-openstack-openstack-cell1/0.log" Jan 29 09:14:07 crc kubenswrapper[4861]: I0129 09:14:07.461840 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5585ff97bc-f7zkv_ed67e6e0-58ef-42d0-8a69-a7f1b0b0da70/dnsmasq-dns/0.log" Jan 29 09:14:07 crc kubenswrapper[4861]: I0129 09:14:07.635585 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_9f7eb619-5ed2-4e32-a87a-ec61ac26bd98/glance-httpd/0.log" Jan 29 09:14:07 crc kubenswrapper[4861]: I0129 09:14:07.715244 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_9f7eb619-5ed2-4e32-a87a-ec61ac26bd98/glance-log/0.log" Jan 29 09:14:07 crc kubenswrapper[4861]: I0129 09:14:07.921239 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_1ff300e5-7a29-4ec7-974c-de163370f2f8/glance-httpd/0.log" Jan 29 09:14:08 crc kubenswrapper[4861]: I0129 09:14:08.003699 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_1ff300e5-7a29-4ec7-974c-de163370f2f8/glance-log/0.log" Jan 29 09:14:08 crc kubenswrapper[4861]: I0129 09:14:08.597058 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-bf66d5877-52r4g_c283e31a-5c70-4767-9a37-28a0778db9d3/heat-engine/0.log" Jan 29 09:14:08 crc kubenswrapper[4861]: I0129 09:14:08.621156 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-596f6c779b-zwj48_09f79ef4-29fe-49af-bc34-175306e9211a/heat-api/0.log" Jan 29 09:14:08 crc kubenswrapper[4861]: I0129 09:14:08.829269 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-668fc5bbd6-8whvv_a45c4bb9-8e82-45ac-b264-72f2325e8c3f/heat-cfnapi/0.log" Jan 29 09:14:09 crc kubenswrapper[4861]: I0129 09:14:09.068088 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-65994449bb-mb4xq_f338e1be-fc4b-4c1b-b08f-456303eef9bc/horizon/0.log" Jan 29 09:14:09 crc kubenswrapper[4861]: I0129 09:14:09.102548 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-cell1-dbqfj_394365fa-5d64-4f5e-b8f3-9d4abc0dbe07/install-certs-openstack-openstack-cell1/0.log" Jan 29 09:14:09 crc kubenswrapper[4861]: I0129 09:14:09.345630 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-65994449bb-mb4xq_f338e1be-fc4b-4c1b-b08f-456303eef9bc/horizon-log/0.log" Jan 29 09:14:09 crc kubenswrapper[4861]: I0129 09:14:09.397016 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-r2w5q_6272a2bc-68af-4bb6-9e65-ba51d3183b87/install-os-openstack-openstack-cell1/0.log" Jan 29 09:14:09 crc kubenswrapper[4861]: I0129 09:14:09.565305 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6587f5774c-8wdxx_abc77fef-ccca-488a-af89-450aa3b0836e/keystone-api/0.log" Jan 29 09:14:09 crc kubenswrapper[4861]: I0129 09:14:09.622318 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29494621-thqv2_0a51c126-e023-43a7-91a1-4d806f2adb73/keystone-cron/0.log" Jan 29 09:14:09 crc kubenswrapper[4861]: I0129 09:14:09.914408 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_50409933-4a24-4832-afbf-bdf93efe7de7/kube-state-metrics/0.log" Jan 29 09:14:10 crc kubenswrapper[4861]: I0129 09:14:10.171710 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-openstack-openstack-cell1-x4qcm_6e815d1a-6590-4dd3-95c2-f997fd213f09/libvirt-openstack-openstack-cell1/0.log" Jan 29 09:14:10 crc kubenswrapper[4861]: I0129 09:14:10.755022 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-copy-data_7c2eaff6-8fb9-4112-bdbd-e9dedc4233ce/adoption/0.log" Jan 29 09:14:10 crc kubenswrapper[4861]: I0129 09:14:10.945670 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7dc5d457f5-tv7x9_758c49ec-0604-450b-8d71-6a01e3993cb6/neutron-api/0.log" Jan 29 09:14:11 crc kubenswrapper[4861]: I0129 09:14:11.049993 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7dc5d457f5-tv7x9_758c49ec-0604-450b-8d71-6a01e3993cb6/neutron-httpd/0.log" Jan 29 09:14:11 crc kubenswrapper[4861]: I0129 09:14:11.139985 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dhcp-openstack-openstack-cell1-qvqjb_c90d0f7b-c4ef-4352-aef0-3f038ec6a922/neutron-dhcp-openstack-openstack-cell1/0.log" Jan 29 09:14:11 crc kubenswrapper[4861]: I0129 09:14:11.357559 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-cell1-5jc4s_64030eee-0e1c-4038-968b-421a3542fa93/neutron-metadata-openstack-openstack-cell1/0.log" Jan 29 09:14:11 crc kubenswrapper[4861]: I0129 09:14:11.516950 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-sriov-openstack-openstack-cell1-txpmz_607f0b15-73a6-4554-a410-30135f075145/neutron-sriov-openstack-openstack-cell1/0.log" Jan 29 09:14:11 crc kubenswrapper[4861]: I0129 09:14:11.924723 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_39d2b1c9-7a75-415a-90ff-cd7a10fb7a20/nova-api-log/0.log" Jan 29 09:14:11 crc kubenswrapper[4861]: I0129 09:14:11.939686 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_39d2b1c9-7a75-415a-90ff-cd7a10fb7a20/nova-api-api/0.log" Jan 29 09:14:12 crc kubenswrapper[4861]: I0129 09:14:12.084450 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_b42f984f-e0f7-4e8a-91d4-ca1443d529b1/nova-cell0-conductor-conductor/0.log" Jan 29 09:14:12 crc kubenswrapper[4861]: I0129 09:14:12.974214 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_97ba244c-c0ff-41ab-8e3b-f566ecef325d/nova-cell1-conductor-conductor/0.log" Jan 29 09:14:13 crc kubenswrapper[4861]: I0129 09:14:13.019388 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_9346b8cf-2d39-4674-a59b-0b6bb04cab33/nova-cell1-novncproxy-novncproxy/0.log" Jan 29 09:14:13 crc kubenswrapper[4861]: I0129 09:14:13.303632 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell66fds_5a7fa956-9445-4483-b3c5-9d0548f8f2b4/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1/0.log" Jan 29 09:14:13 crc kubenswrapper[4861]: I0129 09:14:13.320894 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-openstack-cell1-vkvwl_ef737a6a-1c77-460c-9152-952334d3ede1/nova-cell1-openstack-openstack-cell1/0.log" Jan 29 09:14:13 crc kubenswrapper[4861]: I0129 09:14:13.677328 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_e34de8ff-fa76-4858-99f8-b5c8365adf63/nova-metadata-log/0.log" Jan 29 09:14:14 crc kubenswrapper[4861]: I0129 09:14:14.208605 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-586997dfdc-nzlgn_e5dd14c4-275b-46ce-9c6d-cbba0800e2a0/init/0.log" Jan 29 09:14:14 crc kubenswrapper[4861]: I0129 09:14:14.282902 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_ba7324f0-6a1f-402b-a403-445482d46dc4/nova-scheduler-scheduler/0.log" Jan 29 09:14:14 crc kubenswrapper[4861]: I0129 09:14:14.381379 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_e34de8ff-fa76-4858-99f8-b5c8365adf63/nova-metadata-metadata/0.log" Jan 29 09:14:14 crc kubenswrapper[4861]: I0129 09:14:14.516604 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-586997dfdc-nzlgn_e5dd14c4-275b-46ce-9c6d-cbba0800e2a0/init/0.log" Jan 29 09:14:14 crc kubenswrapper[4861]: I0129 09:14:14.538430 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-586997dfdc-nzlgn_e5dd14c4-275b-46ce-9c6d-cbba0800e2a0/octavia-api-provider-agent/0.log" Jan 29 09:14:14 crc kubenswrapper[4861]: I0129 09:14:14.743605 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-586997dfdc-nzlgn_e5dd14c4-275b-46ce-9c6d-cbba0800e2a0/octavia-api/0.log" Jan 29 09:14:14 crc kubenswrapper[4861]: I0129 09:14:14.811907 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-nrhlj_51077d9f-c090-4d63-87e3-e4289c5a672b/init/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.084250 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-nrhlj_51077d9f-c090-4d63-87e3-e4289c5a672b/init/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.124337 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-z4884_6a35cb82-3c8f-4c31-9efc-a045c24053b7/init/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.174362 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-nrhlj_51077d9f-c090-4d63-87e3-e4289c5a672b/octavia-healthmanager/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.269188 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-z4884_6a35cb82-3c8f-4c31-9efc-a045c24053b7/init/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.365220 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-z4884_6a35cb82-3c8f-4c31-9efc-a045c24053b7/octavia-housekeeping/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.454598 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7b4hh_3f06299f-473a-4b1e-8c5c-8b6eae0a27d8/init/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.734643 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-x5hh9_e2550201-47c9-4d04-8161-24265932e7e4/init/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.734833 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7b4hh_3f06299f-473a-4b1e-8c5c-8b6eae0a27d8/init/0.log" Jan 29 09:14:15 crc kubenswrapper[4861]: I0129 09:14:15.772684 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7b4hh_3f06299f-473a-4b1e-8c5c-8b6eae0a27d8/octavia-rsyslog/0.log" Jan 29 09:14:16 crc kubenswrapper[4861]: I0129 09:14:16.141401 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3da8e0ee-9c1a-4557-a727-14de15187b68/mysql-bootstrap/0.log" Jan 29 09:14:16 crc kubenswrapper[4861]: I0129 09:14:16.159176 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-x5hh9_e2550201-47c9-4d04-8161-24265932e7e4/init/0.log" Jan 29 09:14:16 crc kubenswrapper[4861]: I0129 09:14:16.318323 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-x5hh9_e2550201-47c9-4d04-8161-24265932e7e4/octavia-worker/0.log" Jan 29 09:14:16 crc kubenswrapper[4861]: I0129 09:14:16.583134 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3da8e0ee-9c1a-4557-a727-14de15187b68/mysql-bootstrap/0.log" Jan 29 09:14:16 crc kubenswrapper[4861]: I0129 09:14:16.751548 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ca4fde45-47fb-44fa-baea-904bfec6b6e8/mysql-bootstrap/0.log" Jan 29 09:14:16 crc kubenswrapper[4861]: I0129 09:14:16.759279 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_3da8e0ee-9c1a-4557-a727-14de15187b68/galera/0.log" Jan 29 09:14:17 crc kubenswrapper[4861]: I0129 09:14:17.062446 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ca4fde45-47fb-44fa-baea-904bfec6b6e8/galera/0.log" Jan 29 09:14:17 crc kubenswrapper[4861]: I0129 09:14:17.095478 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ca4fde45-47fb-44fa-baea-904bfec6b6e8/mysql-bootstrap/0.log" Jan 29 09:14:17 crc kubenswrapper[4861]: I0129 09:14:17.128357 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_1476fc3b-74de-4991-8c45-b400d2f410f6/openstackclient/0.log" Jan 29 09:14:17 crc kubenswrapper[4861]: I0129 09:14:17.422171 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-v597l_d6dc2d4f-238b-499b-9df1-6f2875854172/openstack-network-exporter/0.log" Jan 29 09:14:17 crc kubenswrapper[4861]: I0129 09:14:17.507352 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kqmjx_0fd778e8-7702-4fc2-9af6-e06ad0631dce/ovsdb-server-init/0.log" Jan 29 09:14:17 crc kubenswrapper[4861]: I0129 09:14:17.763232 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kqmjx_0fd778e8-7702-4fc2-9af6-e06ad0631dce/ovsdb-server-init/0.log" Jan 29 09:14:17 crc kubenswrapper[4861]: I0129 09:14:17.771102 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kqmjx_0fd778e8-7702-4fc2-9af6-e06ad0631dce/ovs-vswitchd/0.log" Jan 29 09:14:17 crc kubenswrapper[4861]: I0129 09:14:17.824056 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kqmjx_0fd778e8-7702-4fc2-9af6-e06ad0631dce/ovsdb-server/0.log" Jan 29 09:14:18 crc kubenswrapper[4861]: I0129 09:14:18.006571 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-s62c5_09c23e71-bf8c-49fc-a0bf-85ff216a6190/ovn-controller/0.log" Jan 29 09:14:18 crc kubenswrapper[4861]: I0129 09:14:18.425270 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-copy-data_46b0bd27-5d0b-41ee-87ec-37bc3bd687d4/adoption/0.log" Jan 29 09:14:18 crc kubenswrapper[4861]: I0129 09:14:18.589109 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d9a0af9d-96bd-42dd-9d60-0581b51b9981/openstack-network-exporter/0.log" Jan 29 09:14:18 crc kubenswrapper[4861]: I0129 09:14:18.711492 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d9a0af9d-96bd-42dd-9d60-0581b51b9981/ovn-northd/0.log" Jan 29 09:14:18 crc kubenswrapper[4861]: I0129 09:14:18.935644 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-cell1-w2rml_0ea9ddc2-bed7-4baa-9835-abfd8aefc637/ovn-openstack-openstack-cell1/0.log" Jan 29 09:14:18 crc kubenswrapper[4861]: I0129 09:14:18.998664 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_36c95ecd-713c-48cd-9a1d-3d4d7619f73a/openstack-network-exporter/0.log" Jan 29 09:14:19 crc kubenswrapper[4861]: I0129 09:14:19.111146 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_36c95ecd-713c-48cd-9a1d-3d4d7619f73a/ovsdbserver-nb/0.log" Jan 29 09:14:19 crc kubenswrapper[4861]: I0129 09:14:19.234664 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_a9e304d1-7510-4764-915b-9a9d44d43587/openstack-network-exporter/0.log" Jan 29 09:14:19 crc kubenswrapper[4861]: I0129 09:14:19.327119 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_a9e304d1-7510-4764-915b-9a9d44d43587/ovsdbserver-nb/0.log" Jan 29 09:14:19 crc kubenswrapper[4861]: I0129 09:14:19.515431 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_ac0e53a1-0e72-4889-9306-50b1b0f60ee0/openstack-network-exporter/0.log" Jan 29 09:14:19 crc kubenswrapper[4861]: I0129 09:14:19.562877 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_ac0e53a1-0e72-4889-9306-50b1b0f60ee0/ovsdbserver-nb/0.log" Jan 29 09:14:19 crc kubenswrapper[4861]: I0129 09:14:19.845346 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_015bc043-2935-42e9-8259-2a2a113e30a8/openstack-network-exporter/0.log" Jan 29 09:14:19 crc kubenswrapper[4861]: I0129 09:14:19.855095 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_015bc043-2935-42e9-8259-2a2a113e30a8/ovsdbserver-sb/0.log" Jan 29 09:14:20 crc kubenswrapper[4861]: I0129 09:14:20.022900 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe/openstack-network-exporter/0.log" Jan 29 09:14:20 crc kubenswrapper[4861]: I0129 09:14:20.154625 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_0e378828-d06c-4a89-9ca3-f134cd743d94/openstack-network-exporter/0.log" Jan 29 09:14:20 crc kubenswrapper[4861]: I0129 09:14:20.186640 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_8dd96bd8-e0b0-4c8a-98d0-2da90a7e6ebe/ovsdbserver-sb/0.log" Jan 29 09:14:20 crc kubenswrapper[4861]: I0129 09:14:20.792994 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-59dd9c5cc4-d4jm7_9ef50500-52f6-45bd-b3da-27d20b536b9e/placement-api/0.log" Jan 29 09:14:20 crc kubenswrapper[4861]: I0129 09:14:20.808203 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_0e378828-d06c-4a89-9ca3-f134cd743d94/ovsdbserver-sb/0.log" Jan 29 09:14:21 crc kubenswrapper[4861]: I0129 09:14:21.387109 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-clh8mw_0494d495-b64c-456c-8424-e929353eea07/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Jan 29 09:14:21 crc kubenswrapper[4861]: I0129 09:14:21.505895 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-59dd9c5cc4-d4jm7_9ef50500-52f6-45bd-b3da-27d20b536b9e/placement-log/0.log" Jan 29 09:14:21 crc kubenswrapper[4861]: I0129 09:14:21.637387 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_15b58b86-fb90-42d4-8818-774def6f7b1c/init-config-reloader/0.log" Jan 29 09:14:21 crc kubenswrapper[4861]: I0129 09:14:21.788384 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_15b58b86-fb90-42d4-8818-774def6f7b1c/init-config-reloader/0.log" Jan 29 09:14:21 crc kubenswrapper[4861]: I0129 09:14:21.848511 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_15b58b86-fb90-42d4-8818-774def6f7b1c/prometheus/0.log" Jan 29 09:14:21 crc kubenswrapper[4861]: I0129 09:14:21.865151 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_15b58b86-fb90-42d4-8818-774def6f7b1c/thanos-sidecar/0.log" Jan 29 09:14:21 crc kubenswrapper[4861]: I0129 09:14:21.870317 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_15b58b86-fb90-42d4-8818-774def6f7b1c/config-reloader/0.log" Jan 29 09:14:22 crc kubenswrapper[4861]: I0129 09:14:22.103570 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_511ae511-f29a-4c04-b655-b6168b8622db/setup-container/0.log" Jan 29 09:14:22 crc kubenswrapper[4861]: I0129 09:14:22.264191 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_511ae511-f29a-4c04-b655-b6168b8622db/setup-container/0.log" Jan 29 09:14:22 crc kubenswrapper[4861]: I0129 09:14:22.394220 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_511ae511-f29a-4c04-b655-b6168b8622db/rabbitmq/0.log" Jan 29 09:14:22 crc kubenswrapper[4861]: I0129 09:14:22.443580 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36a174b6-c79e-4486-81d0-16a0ddb54e96/setup-container/0.log" Jan 29 09:14:22 crc kubenswrapper[4861]: I0129 09:14:22.471649 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_1079f778-4a50-46fe-a07a-5a7059b2865d/memcached/0.log" Jan 29 09:14:23 crc kubenswrapper[4861]: I0129 09:14:23.282247 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36a174b6-c79e-4486-81d0-16a0ddb54e96/setup-container/0.log" Jan 29 09:14:23 crc kubenswrapper[4861]: I0129 09:14:23.586237 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-cell1-2xtw4_d81c0053-0659-4f0c-bb7e-e8a63e13a4d8/reboot-os-openstack-openstack-cell1/0.log" Jan 29 09:14:23 crc kubenswrapper[4861]: I0129 09:14:23.595665 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_36a174b6-c79e-4486-81d0-16a0ddb54e96/rabbitmq/0.log" Jan 29 09:14:23 crc kubenswrapper[4861]: I0129 09:14:23.775125 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-cell1-4d2vz_de505364-8d47-4ec7-ad63-9e28daabfb88/run-os-openstack-openstack-cell1/0.log" Jan 29 09:14:23 crc kubenswrapper[4861]: I0129 09:14:23.864391 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-openstack-shvjr_9a1adddb-0afb-4b8a-b08d-24a8045a6010/ssh-known-hosts-openstack/0.log" Jan 29 09:14:24 crc kubenswrapper[4861]: I0129 09:14:24.079789 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-578c88c7d6-cxzrb_7a3b8613-fa03-4ee0-90f0-bc02abb0e72b/proxy-httpd/0.log" Jan 29 09:14:24 crc kubenswrapper[4861]: I0129 09:14:24.119712 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-578c88c7d6-cxzrb_7a3b8613-fa03-4ee0-90f0-bc02abb0e72b/proxy-server/0.log" Jan 29 09:14:24 crc kubenswrapper[4861]: I0129 09:14:24.166363 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-9vcj8_84b0d825-b891-429f-b7fe-6fc86904133c/swift-ring-rebalance/0.log" Jan 29 09:14:24 crc kubenswrapper[4861]: I0129 09:14:24.383790 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-openstack-openstack-cell1-4r8cl_146988b7-5ac0-4634-a55a-0e66bb5a624e/telemetry-openstack-openstack-cell1/0.log" Jan 29 09:14:24 crc kubenswrapper[4861]: I0129 09:14:24.471532 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-t7rtv_7d1929c2-3989-426c-9af1-4c54abe0ab7e/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Jan 29 09:14:24 crc kubenswrapper[4861]: I0129 09:14:24.607196 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-8strq_9b1d958c-687e-4686-b8ac-87ea982bb0d9/validate-network-openstack-openstack-cell1/0.log" Jan 29 09:14:56 crc kubenswrapper[4861]: I0129 09:14:56.804596 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b6c4d8c5f-s68hj_0e2f821a-5976-405b-860f-fc5c14ca3c06/manager/0.log" Jan 29 09:14:56 crc kubenswrapper[4861]: I0129 09:14:56.842344 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8d874c8fc-tx9q6_b708431b-9a40-4216-8ff2-e52626a78852/manager/0.log" Jan 29 09:14:56 crc kubenswrapper[4861]: I0129 09:14:56.865857 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8_ba8147a9-0ccc-4cc8-974b-b6eaa899a226/util/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.038343 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8_ba8147a9-0ccc-4cc8-974b-b6eaa899a226/util/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.089640 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8_ba8147a9-0ccc-4cc8-974b-b6eaa899a226/pull/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.089885 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8_ba8147a9-0ccc-4cc8-974b-b6eaa899a226/pull/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.256158 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8_ba8147a9-0ccc-4cc8-974b-b6eaa899a226/util/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.295399 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8_ba8147a9-0ccc-4cc8-974b-b6eaa899a226/pull/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.320481 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_da00ab39b03ae6839a9374e1f0e5cf4d34a49b875de165714f9039924dtbjp8_ba8147a9-0ccc-4cc8-974b-b6eaa899a226/extract/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.492583 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6d9697b7f4-7gs4c_7c81b981-2cba-4cc6-a5c8-aa3d86378e3a/manager/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.613476 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8886f4c47-dw8n5_6cd88ee1-a38e-43f0-9470-d7a9745665df/manager/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.735979 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-69d6db494d-25p9x_e3e0da3c-3983-46a4-b9fe-ef07be8ca90e/manager/0.log" Jan 29 09:14:57 crc kubenswrapper[4861]: I0129 09:14:57.815856 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5fb775575f-ps8hr_92f620b6-be39-449e-a1fc-ff64804364b5/manager/0.log" Jan 29 09:14:58 crc kubenswrapper[4861]: I0129 09:14:58.078215 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5f4b8bd54d-txmj4_c5c7a9b1-011f-435e-8a50-dd0ee333a811/manager/0.log" Jan 29 09:14:58 crc kubenswrapper[4861]: I0129 09:14:58.466303 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7dd968899f-7hqbq_444df6a8-8c8f-4f2f-8092-1dd392f6eed1/manager/0.log" Jan 29 09:14:58 crc kubenswrapper[4861]: I0129 09:14:58.528941 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-84f48565d4-v4qvf_0362a774-9d2b-491c-9a6b-96811db0d456/manager/0.log" Jan 29 09:14:58 crc kubenswrapper[4861]: I0129 09:14:58.677219 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79955696d6-p2vh6_b145dd3d-4fa3-44ab-b4d8-879b6ebf2fa5/manager/0.log" Jan 29 09:14:58 crc kubenswrapper[4861]: I0129 09:14:58.799174 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-67bf948998-2tjv4_4ffdee81-542f-424a-b4ed-0db4b0ad2409/manager/0.log" Jan 29 09:14:58 crc kubenswrapper[4861]: I0129 09:14:58.950902 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-585dbc889-nszsj_5d6f19fb-11f0-4854-8590-b97ecb2e2ab7/manager/0.log" Jan 29 09:14:59 crc kubenswrapper[4861]: I0129 09:14:59.231699 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-55bff696bd-xjbh9_4c9a63c6-aa31-43cc-8b09-34a877dc2957/manager/0.log" Jan 29 09:14:59 crc kubenswrapper[4861]: I0129 09:14:59.245863 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6687f8d877-7cw4w_e2b67e50-9400-44b6-b1ba-b6185f758932/manager/0.log" Jan 29 09:14:59 crc kubenswrapper[4861]: I0129 09:14:59.392512 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-d5d667db8-qgbcw_9f3536a4-1e54-4d3a-97fe-5afe4e4e3b48/manager/0.log" Jan 29 09:14:59 crc kubenswrapper[4861]: I0129 09:14:59.565841 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5c4cd4c8c8-hcfcx_9ea19e58-6067-48c1-993c-b1d0cced8997/operator/0.log" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.001205 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-mb56g_2929ade7-fa9f-46a1-9810-0263ea016347/registry-server/0.log" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.059493 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-788c46999f-p8vt7_eede0800-ba92-4df3-9115-6b97f05620da/manager/0.log" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.132475 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b964cf4cd-2blwh_45beb56d-51b6-4737-b0f7-cd7db1d86942/manager/0.log" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.181839 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n"] Jan 29 09:15:00 crc kubenswrapper[4861]: E0129 09:15:00.182430 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerName="registry-server" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.182453 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerName="registry-server" Jan 29 09:15:00 crc kubenswrapper[4861]: E0129 09:15:00.182474 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerName="extract-content" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.182482 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerName="extract-content" Jan 29 09:15:00 crc kubenswrapper[4861]: E0129 09:15:00.182500 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerName="extract-utilities" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.182508 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerName="extract-utilities" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.182842 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4fb441d-4394-469f-8381-16ae9f6bb072" containerName="registry-server" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.183831 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.191102 4861 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.191742 4861 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.192389 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdddp\" (UniqueName: \"kubernetes.io/projected/2d9bc30a-467a-4e2e-98e3-d66104565f5f-kube-api-access-bdddp\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.192583 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2d9bc30a-467a-4e2e-98e3-d66104565f5f-secret-volume\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.192670 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2d9bc30a-467a-4e2e-98e3-d66104565f5f-config-volume\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.216635 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n"] Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.296663 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2d9bc30a-467a-4e2e-98e3-d66104565f5f-config-volume\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.296757 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdddp\" (UniqueName: \"kubernetes.io/projected/2d9bc30a-467a-4e2e-98e3-d66104565f5f-kube-api-access-bdddp\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.297027 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2d9bc30a-467a-4e2e-98e3-d66104565f5f-secret-volume\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.301954 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2d9bc30a-467a-4e2e-98e3-d66104565f5f-config-volume\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.308313 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2d9bc30a-467a-4e2e-98e3-d66104565f5f-secret-volume\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.336335 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdddp\" (UniqueName: \"kubernetes.io/projected/2d9bc30a-467a-4e2e-98e3-d66104565f5f-kube-api-access-bdddp\") pod \"collect-profiles-29494635-vjn5n\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.392368 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-t7725_e2e876a2-9fd5-4811-bd40-c3e07276fe2b/operator/0.log" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.456580 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-68fc8c869-jpq7j_6bbf867e-a436-4e97-aaa5-a77f4ab796ee/manager/0.log" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.528617 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:00 crc kubenswrapper[4861]: I0129 09:15:00.773028 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-56f8bfcd9f-rw2h2_30562a65-f3e8-4b37-b722-d1c0a8c03996/manager/0.log" Jan 29 09:15:01 crc kubenswrapper[4861]: I0129 09:15:01.008028 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-564965969-xckfp_79ee9746-9925-45d6-b37b-06ddee279d38/manager/0.log" Jan 29 09:15:01 crc kubenswrapper[4861]: I0129 09:15:01.073583 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n"] Jan 29 09:15:01 crc kubenswrapper[4861]: I0129 09:15:01.103099 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-64b5b76f97-wwnvz_a8e928e7-8173-4e7d-ae37-26eaeec75ccb/manager/0.log" Jan 29 09:15:01 crc kubenswrapper[4861]: I0129 09:15:01.687887 4861 generic.go:334] "Generic (PLEG): container finished" podID="2d9bc30a-467a-4e2e-98e3-d66104565f5f" containerID="1ad011a7cdeee0cf62b494ad54e989507df71ead3ad471c1fe5a0d25e58929c7" exitCode=0 Jan 29 09:15:01 crc kubenswrapper[4861]: I0129 09:15:01.688262 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" event={"ID":"2d9bc30a-467a-4e2e-98e3-d66104565f5f","Type":"ContainerDied","Data":"1ad011a7cdeee0cf62b494ad54e989507df71ead3ad471c1fe5a0d25e58929c7"} Jan 29 09:15:01 crc kubenswrapper[4861]: I0129 09:15:01.688313 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" event={"ID":"2d9bc30a-467a-4e2e-98e3-d66104565f5f","Type":"ContainerStarted","Data":"f6e8eb837025f2f60d06d3b5064658eb5806603144cfa991b31edda8f45dc7ef"} Jan 29 09:15:02 crc kubenswrapper[4861]: I0129 09:15:02.768744 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7b54f464f6-95nrq_8f8a9499-a68a-4797-9801-a070bff21b9f/manager/0.log" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.152681 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.287841 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2d9bc30a-467a-4e2e-98e3-d66104565f5f-secret-volume\") pod \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.287930 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2d9bc30a-467a-4e2e-98e3-d66104565f5f-config-volume\") pod \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.287992 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdddp\" (UniqueName: \"kubernetes.io/projected/2d9bc30a-467a-4e2e-98e3-d66104565f5f-kube-api-access-bdddp\") pod \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\" (UID: \"2d9bc30a-467a-4e2e-98e3-d66104565f5f\") " Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.289045 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d9bc30a-467a-4e2e-98e3-d66104565f5f-config-volume" (OuterVolumeSpecName: "config-volume") pod "2d9bc30a-467a-4e2e-98e3-d66104565f5f" (UID: "2d9bc30a-467a-4e2e-98e3-d66104565f5f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.290890 4861 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2d9bc30a-467a-4e2e-98e3-d66104565f5f-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.294737 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d9bc30a-467a-4e2e-98e3-d66104565f5f-kube-api-access-bdddp" (OuterVolumeSpecName: "kube-api-access-bdddp") pod "2d9bc30a-467a-4e2e-98e3-d66104565f5f" (UID: "2d9bc30a-467a-4e2e-98e3-d66104565f5f"). InnerVolumeSpecName "kube-api-access-bdddp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.306398 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d9bc30a-467a-4e2e-98e3-d66104565f5f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2d9bc30a-467a-4e2e-98e3-d66104565f5f" (UID: "2d9bc30a-467a-4e2e-98e3-d66104565f5f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.393443 4861 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2d9bc30a-467a-4e2e-98e3-d66104565f5f-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.393487 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdddp\" (UniqueName: \"kubernetes.io/projected/2d9bc30a-467a-4e2e-98e3-d66104565f5f-kube-api-access-bdddp\") on node \"crc\" DevicePath \"\"" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.708410 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" event={"ID":"2d9bc30a-467a-4e2e-98e3-d66104565f5f","Type":"ContainerDied","Data":"f6e8eb837025f2f60d06d3b5064658eb5806603144cfa991b31edda8f45dc7ef"} Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.708449 4861 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6e8eb837025f2f60d06d3b5064658eb5806603144cfa991b31edda8f45dc7ef" Jan 29 09:15:03 crc kubenswrapper[4861]: I0129 09:15:03.708474 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494635-vjn5n" Jan 29 09:15:04 crc kubenswrapper[4861]: I0129 09:15:04.230879 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g"] Jan 29 09:15:04 crc kubenswrapper[4861]: I0129 09:15:04.243376 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494590-z8d7g"] Jan 29 09:15:05 crc kubenswrapper[4861]: I0129 09:15:05.130929 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d76c0b51-8664-4916-9cb4-03cee1a4b2b7" path="/var/lib/kubelet/pods/d76c0b51-8664-4916-9cb4-03cee1a4b2b7/volumes" Jan 29 09:15:22 crc kubenswrapper[4861]: I0129 09:15:22.825622 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-vhjkq_666760cf-9fb4-415b-929e-14212d7cf828/control-plane-machine-set-operator/0.log" Jan 29 09:15:23 crc kubenswrapper[4861]: I0129 09:15:23.027226 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-r2hxt_c67abd0d-bb61-4bd2-a58b-42f2969e1ac1/kube-rbac-proxy/0.log" Jan 29 09:15:23 crc kubenswrapper[4861]: I0129 09:15:23.095011 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-r2hxt_c67abd0d-bb61-4bd2-a58b-42f2969e1ac1/machine-api-operator/0.log" Jan 29 09:15:30 crc kubenswrapper[4861]: I0129 09:15:30.629514 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:15:30 crc kubenswrapper[4861]: I0129 09:15:30.630024 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:15:34 crc kubenswrapper[4861]: I0129 09:15:34.148981 4861 scope.go:117] "RemoveContainer" containerID="4ecb1fdafa15c83a3776674b3804e1e0c4324ef847dd7ee8495690710b36a431" Jan 29 09:15:37 crc kubenswrapper[4861]: I0129 09:15:37.026816 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-545d4d4674-9l5lz_aa8044ac-0d9d-4431-b0b2-cbd3016db3d0/cert-manager-controller/0.log" Jan 29 09:15:37 crc kubenswrapper[4861]: I0129 09:15:37.123431 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-5545bd876-hzfm6_2fd9d7e9-c75a-4dab-9081-e8314cbf9cfa/cert-manager-cainjector/0.log" Jan 29 09:15:37 crc kubenswrapper[4861]: I0129 09:15:37.241774 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-6888856db4-r8stv_480a4e6a-51f3-4bb2-bb7f-4dcb9cf582bf/cert-manager-webhook/0.log" Jan 29 09:15:51 crc kubenswrapper[4861]: I0129 09:15:51.172893 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-xgs7z_4934a549-9f85-465d-88cd-ea90dafe35d8/nmstate-console-plugin/0.log" Jan 29 09:15:51 crc kubenswrapper[4861]: I0129 09:15:51.260254 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-ttsz5_059322b0-c68b-4446-ae78-5159dfd3606d/nmstate-handler/0.log" Jan 29 09:15:51 crc kubenswrapper[4861]: I0129 09:15:51.403464 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-dskl8_d9d5a91c-c0a2-4472-a844-482db18355ae/kube-rbac-proxy/0.log" Jan 29 09:15:51 crc kubenswrapper[4861]: I0129 09:15:51.404232 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-dskl8_d9d5a91c-c0a2-4472-a844-482db18355ae/nmstate-metrics/0.log" Jan 29 09:15:51 crc kubenswrapper[4861]: I0129 09:15:51.628594 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-8cgg9_32bcf48d-749b-4e22-b0a1-7465f42685be/nmstate-operator/0.log" Jan 29 09:15:51 crc kubenswrapper[4861]: I0129 09:15:51.659992 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-9cnf6_219139a6-7711-4a83-a2a2-c8901e8195b6/nmstate-webhook/0.log" Jan 29 09:16:00 crc kubenswrapper[4861]: I0129 09:16:00.630083 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:16:00 crc kubenswrapper[4861]: I0129 09:16:00.630642 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:16:07 crc kubenswrapper[4861]: I0129 09:16:07.148486 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-w7lll_93f09c4e-4102-44dd-b65f-3aa0088f14f0/prometheus-operator/0.log" Jan 29 09:16:07 crc kubenswrapper[4861]: I0129 09:16:07.497169 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp_14b0386e-4bd1-484e-a955-b8d3e390529f/prometheus-operator-admission-webhook/0.log" Jan 29 09:16:07 crc kubenswrapper[4861]: I0129 09:16:07.562582 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd_923475bd-8c9e-4426-b3f5-d632cc1bd3a6/prometheus-operator-admission-webhook/0.log" Jan 29 09:16:07 crc kubenswrapper[4861]: I0129 09:16:07.781974 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-pfrzn_33374b3a-fbdd-4fc7-8743-a8f979e12ee8/operator/0.log" Jan 29 09:16:07 crc kubenswrapper[4861]: I0129 09:16:07.826299 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-flrtn_fbda6774-02b4-4961-bc5e-8f022a7ca584/perses-operator/0.log" Jan 29 09:16:23 crc kubenswrapper[4861]: I0129 09:16:23.338453 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-b58f5_55af9f43-c216-4c63-9ca4-564120262b41/kube-rbac-proxy/0.log" Jan 29 09:16:23 crc kubenswrapper[4861]: I0129 09:16:23.805945 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-b58f5_55af9f43-c216-4c63-9ca4-564120262b41/controller/0.log" Jan 29 09:16:23 crc kubenswrapper[4861]: I0129 09:16:23.817026 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-frr-files/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.006424 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-reloader/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.016769 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-reloader/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.034891 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-metrics/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.049023 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-frr-files/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.215989 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-frr-files/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.219306 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-reloader/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.238906 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-metrics/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.267981 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-metrics/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.469657 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-frr-files/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.476756 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-metrics/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.488246 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/cp-reloader/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.520229 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/controller/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.714239 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/kube-rbac-proxy/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.741141 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/frr-metrics/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.760832 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/kube-rbac-proxy-frr/0.log" Jan 29 09:16:24 crc kubenswrapper[4861]: I0129 09:16:24.954345 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/reloader/0.log" Jan 29 09:16:25 crc kubenswrapper[4861]: I0129 09:16:25.002758 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-cq4j7_16a3011f-4927-45e1-8748-a8baf0db3e61/frr-k8s-webhook-server/0.log" Jan 29 09:16:25 crc kubenswrapper[4861]: I0129 09:16:25.196895 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6c8b689f68-d2g2z_e68679d2-955e-4332-8692-ae753a55450c/manager/0.log" Jan 29 09:16:25 crc kubenswrapper[4861]: I0129 09:16:25.366204 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-b4bd6c689-wxp4r_26677778-6a62-4e26-ae66-d7bab9bcdfe6/webhook-server/0.log" Jan 29 09:16:25 crc kubenswrapper[4861]: I0129 09:16:25.596664 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-dfg5n_54091384-0c14-42be-a638-a8abb8171ad1/kube-rbac-proxy/0.log" Jan 29 09:16:26 crc kubenswrapper[4861]: I0129 09:16:26.575501 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-dfg5n_54091384-0c14-42be-a638-a8abb8171ad1/speaker/0.log" Jan 29 09:16:28 crc kubenswrapper[4861]: I0129 09:16:28.245482 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pwx9r_83f5313a-3181-4f9a-a7b3-9dfbd14719be/frr/0.log" Jan 29 09:16:30 crc kubenswrapper[4861]: I0129 09:16:30.630160 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:16:30 crc kubenswrapper[4861]: I0129 09:16:30.630704 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:16:30 crc kubenswrapper[4861]: I0129 09:16:30.630755 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 09:16:30 crc kubenswrapper[4861]: I0129 09:16:30.631662 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c6f1b77eb0a98192a07ee6373d3161e796a4f5d373da3b2d30e4e75fe52c4f99"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 09:16:30 crc kubenswrapper[4861]: I0129 09:16:30.631727 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://c6f1b77eb0a98192a07ee6373d3161e796a4f5d373da3b2d30e4e75fe52c4f99" gracePeriod=600 Jan 29 09:16:31 crc kubenswrapper[4861]: I0129 09:16:31.677401 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="c6f1b77eb0a98192a07ee6373d3161e796a4f5d373da3b2d30e4e75fe52c4f99" exitCode=0 Jan 29 09:16:31 crc kubenswrapper[4861]: I0129 09:16:31.677494 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"c6f1b77eb0a98192a07ee6373d3161e796a4f5d373da3b2d30e4e75fe52c4f99"} Jan 29 09:16:31 crc kubenswrapper[4861]: I0129 09:16:31.677974 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerStarted","Data":"9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed"} Jan 29 09:16:31 crc kubenswrapper[4861]: I0129 09:16:31.677997 4861 scope.go:117] "RemoveContainer" containerID="76b71a45b383b034505418caae2adaf8f87f51e906abf29d132cda79f213aa59" Jan 29 09:16:40 crc kubenswrapper[4861]: I0129 09:16:40.331491 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7_6296013e-e8a8-4e37-b501-77ac33f4652d/util/0.log" Jan 29 09:16:40 crc kubenswrapper[4861]: I0129 09:16:40.586126 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7_6296013e-e8a8-4e37-b501-77ac33f4652d/util/0.log" Jan 29 09:16:40 crc kubenswrapper[4861]: I0129 09:16:40.588246 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7_6296013e-e8a8-4e37-b501-77ac33f4652d/pull/0.log" Jan 29 09:16:40 crc kubenswrapper[4861]: I0129 09:16:40.680294 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7_6296013e-e8a8-4e37-b501-77ac33f4652d/pull/0.log" Jan 29 09:16:40 crc kubenswrapper[4861]: I0129 09:16:40.914755 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7_6296013e-e8a8-4e37-b501-77ac33f4652d/util/0.log" Jan 29 09:16:40 crc kubenswrapper[4861]: I0129 09:16:40.916236 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7_6296013e-e8a8-4e37-b501-77ac33f4652d/pull/0.log" Jan 29 09:16:40 crc kubenswrapper[4861]: I0129 09:16:40.919629 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcbk2f7_6296013e-e8a8-4e37-b501-77ac33f4652d/extract/0.log" Jan 29 09:16:41 crc kubenswrapper[4861]: I0129 09:16:41.076801 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6_9dede211-65a8-4376-b8cb-2f692702b30d/util/0.log" Jan 29 09:16:41 crc kubenswrapper[4861]: I0129 09:16:41.253908 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6_9dede211-65a8-4376-b8cb-2f692702b30d/util/0.log" Jan 29 09:16:41 crc kubenswrapper[4861]: I0129 09:16:41.258245 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6_9dede211-65a8-4376-b8cb-2f692702b30d/pull/0.log" Jan 29 09:16:41 crc kubenswrapper[4861]: I0129 09:16:41.310958 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6_9dede211-65a8-4376-b8cb-2f692702b30d/pull/0.log" Jan 29 09:16:41 crc kubenswrapper[4861]: I0129 09:16:41.456311 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6_9dede211-65a8-4376-b8cb-2f692702b30d/util/0.log" Jan 29 09:16:41 crc kubenswrapper[4861]: I0129 09:16:41.499008 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6_9dede211-65a8-4376-b8cb-2f692702b30d/pull/0.log" Jan 29 09:16:41 crc kubenswrapper[4861]: I0129 09:16:41.538351 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713drqv6_9dede211-65a8-4376-b8cb-2f692702b30d/extract/0.log" Jan 29 09:16:41 crc kubenswrapper[4861]: I0129 09:16:41.662797 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg_e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6/util/0.log" Jan 29 09:16:42 crc kubenswrapper[4861]: I0129 09:16:42.442547 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg_e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6/util/0.log" Jan 29 09:16:42 crc kubenswrapper[4861]: I0129 09:16:42.576789 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg_e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6/pull/0.log" Jan 29 09:16:42 crc kubenswrapper[4861]: I0129 09:16:42.628787 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg_e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6/pull/0.log" Jan 29 09:16:42 crc kubenswrapper[4861]: I0129 09:16:42.767140 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg_e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6/util/0.log" Jan 29 09:16:42 crc kubenswrapper[4861]: I0129 09:16:42.771764 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg_e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6/pull/0.log" Jan 29 09:16:42 crc kubenswrapper[4861]: I0129 09:16:42.826514 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5h6zhg_e5513aa6-b5f7-4dbf-bf1f-3e865bd949a6/extract/0.log" Jan 29 09:16:42 crc kubenswrapper[4861]: I0129 09:16:42.963859 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn_79bf7eee-2a3f-44b1-bcb0-1d1b356ed410/util/0.log" Jan 29 09:16:43 crc kubenswrapper[4861]: I0129 09:16:43.151429 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn_79bf7eee-2a3f-44b1-bcb0-1d1b356ed410/pull/0.log" Jan 29 09:16:43 crc kubenswrapper[4861]: I0129 09:16:43.164657 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn_79bf7eee-2a3f-44b1-bcb0-1d1b356ed410/pull/0.log" Jan 29 09:16:43 crc kubenswrapper[4861]: I0129 09:16:43.206835 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn_79bf7eee-2a3f-44b1-bcb0-1d1b356ed410/util/0.log" Jan 29 09:16:43 crc kubenswrapper[4861]: I0129 09:16:43.347897 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn_79bf7eee-2a3f-44b1-bcb0-1d1b356ed410/util/0.log" Jan 29 09:16:43 crc kubenswrapper[4861]: I0129 09:16:43.407279 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn_79bf7eee-2a3f-44b1-bcb0-1d1b356ed410/pull/0.log" Jan 29 09:16:43 crc kubenswrapper[4861]: I0129 09:16:43.415090 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08thtkn_79bf7eee-2a3f-44b1-bcb0-1d1b356ed410/extract/0.log" Jan 29 09:16:43 crc kubenswrapper[4861]: I0129 09:16:43.526455 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qzrs9_2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7/extract-utilities/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.238771 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qzrs9_2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7/extract-content/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.240345 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qzrs9_2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7/extract-content/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.258246 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qzrs9_2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7/extract-utilities/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.420260 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qzrs9_2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7/extract-content/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.456839 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lttc5_eb3b12c8-6e96-4fe6-9002-3adf3912a768/extract-utilities/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.519184 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qzrs9_2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7/extract-utilities/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.711056 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lttc5_eb3b12c8-6e96-4fe6-9002-3adf3912a768/extract-utilities/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.767156 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lttc5_eb3b12c8-6e96-4fe6-9002-3adf3912a768/extract-content/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.834844 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lttc5_eb3b12c8-6e96-4fe6-9002-3adf3912a768/extract-content/0.log" Jan 29 09:16:44 crc kubenswrapper[4861]: I0129 09:16:44.964794 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lttc5_eb3b12c8-6e96-4fe6-9002-3adf3912a768/extract-utilities/0.log" Jan 29 09:16:45 crc kubenswrapper[4861]: I0129 09:16:45.008132 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lttc5_eb3b12c8-6e96-4fe6-9002-3adf3912a768/extract-content/0.log" Jan 29 09:16:45 crc kubenswrapper[4861]: I0129 09:16:45.274762 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qd79z_72d86b3f-e0da-4302-8327-90fc7eebdf64/marketplace-operator/0.log" Jan 29 09:16:45 crc kubenswrapper[4861]: I0129 09:16:45.365137 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-m5xpq_7ebc3092-6c7f-4514-956c-84cf246fec0e/extract-utilities/0.log" Jan 29 09:16:45 crc kubenswrapper[4861]: I0129 09:16:45.576436 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-m5xpq_7ebc3092-6c7f-4514-956c-84cf246fec0e/extract-utilities/0.log" Jan 29 09:16:45 crc kubenswrapper[4861]: I0129 09:16:45.641611 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-m5xpq_7ebc3092-6c7f-4514-956c-84cf246fec0e/extract-content/0.log" Jan 29 09:16:45 crc kubenswrapper[4861]: I0129 09:16:45.664141 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-m5xpq_7ebc3092-6c7f-4514-956c-84cf246fec0e/extract-content/0.log" Jan 29 09:16:45 crc kubenswrapper[4861]: I0129 09:16:45.804269 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-m5xpq_7ebc3092-6c7f-4514-956c-84cf246fec0e/extract-utilities/0.log" Jan 29 09:16:45 crc kubenswrapper[4861]: I0129 09:16:45.879775 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-m5xpq_7ebc3092-6c7f-4514-956c-84cf246fec0e/extract-content/0.log" Jan 29 09:16:46 crc kubenswrapper[4861]: I0129 09:16:46.118675 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g5x2t_b9207674-08ce-403f-a91d-e3d2649c8dde/extract-utilities/0.log" Jan 29 09:16:46 crc kubenswrapper[4861]: I0129 09:16:46.436404 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g5x2t_b9207674-08ce-403f-a91d-e3d2649c8dde/extract-content/0.log" Jan 29 09:16:46 crc kubenswrapper[4861]: I0129 09:16:46.437982 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g5x2t_b9207674-08ce-403f-a91d-e3d2649c8dde/extract-utilities/0.log" Jan 29 09:16:46 crc kubenswrapper[4861]: I0129 09:16:46.524195 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-m5xpq_7ebc3092-6c7f-4514-956c-84cf246fec0e/registry-server/0.log" Jan 29 09:16:46 crc kubenswrapper[4861]: I0129 09:16:46.528746 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qzrs9_2eb96abf-4c2b-45a9-93c5-5a9504cdc5d7/registry-server/0.log" Jan 29 09:16:46 crc kubenswrapper[4861]: I0129 09:16:46.638229 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g5x2t_b9207674-08ce-403f-a91d-e3d2649c8dde/extract-content/0.log" Jan 29 09:16:46 crc kubenswrapper[4861]: I0129 09:16:46.836643 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g5x2t_b9207674-08ce-403f-a91d-e3d2649c8dde/extract-utilities/0.log" Jan 29 09:16:46 crc kubenswrapper[4861]: I0129 09:16:46.879808 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g5x2t_b9207674-08ce-403f-a91d-e3d2649c8dde/extract-content/0.log" Jan 29 09:16:47 crc kubenswrapper[4861]: I0129 09:16:47.456795 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lttc5_eb3b12c8-6e96-4fe6-9002-3adf3912a768/registry-server/0.log" Jan 29 09:16:48 crc kubenswrapper[4861]: I0129 09:16:48.166272 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-g5x2t_b9207674-08ce-403f-a91d-e3d2649c8dde/registry-server/0.log" Jan 29 09:17:00 crc kubenswrapper[4861]: I0129 09:17:00.023288 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-67c57698d7-x4jfp_14b0386e-4bd1-484e-a955-b8d3e390529f/prometheus-operator-admission-webhook/0.log" Jan 29 09:17:00 crc kubenswrapper[4861]: I0129 09:17:00.061622 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-67c57698d7-jkgsd_923475bd-8c9e-4426-b3f5-d632cc1bd3a6/prometheus-operator-admission-webhook/0.log" Jan 29 09:17:00 crc kubenswrapper[4861]: I0129 09:17:00.064969 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-w7lll_93f09c4e-4102-44dd-b65f-3aa0088f14f0/prometheus-operator/0.log" Jan 29 09:17:00 crc kubenswrapper[4861]: I0129 09:17:00.227211 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-pfrzn_33374b3a-fbdd-4fc7-8743-a8f979e12ee8/operator/0.log" Jan 29 09:17:00 crc kubenswrapper[4861]: I0129 09:17:00.241565 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-flrtn_fbda6774-02b4-4961-bc5e-8f022a7ca584/perses-operator/0.log" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.484473 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vprk6"] Jan 29 09:17:12 crc kubenswrapper[4861]: E0129 09:17:12.485640 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d9bc30a-467a-4e2e-98e3-d66104565f5f" containerName="collect-profiles" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.485658 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d9bc30a-467a-4e2e-98e3-d66104565f5f" containerName="collect-profiles" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.485895 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d9bc30a-467a-4e2e-98e3-d66104565f5f" containerName="collect-profiles" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.487834 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.511995 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vprk6"] Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.552170 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-catalog-content\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.552227 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-utilities\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.552377 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgb4d\" (UniqueName: \"kubernetes.io/projected/58632105-b529-43b1-b44f-7d042299652a-kube-api-access-lgb4d\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.654731 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgb4d\" (UniqueName: \"kubernetes.io/projected/58632105-b529-43b1-b44f-7d042299652a-kube-api-access-lgb4d\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.655236 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-catalog-content\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.655389 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-utilities\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.655767 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-catalog-content\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.656012 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-utilities\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.676830 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgb4d\" (UniqueName: \"kubernetes.io/projected/58632105-b529-43b1-b44f-7d042299652a-kube-api-access-lgb4d\") pod \"certified-operators-vprk6\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:12 crc kubenswrapper[4861]: I0129 09:17:12.826039 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:13 crc kubenswrapper[4861]: I0129 09:17:13.569749 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vprk6"] Jan 29 09:17:13 crc kubenswrapper[4861]: W0129 09:17:13.692353 4861 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58632105_b529_43b1_b44f_7d042299652a.slice/crio-6c051fdd4a1d4ecada397d2da31111cb3a470d7290d69c01fd4a4b85db92f3ff WatchSource:0}: Error finding container 6c051fdd4a1d4ecada397d2da31111cb3a470d7290d69c01fd4a4b85db92f3ff: Status 404 returned error can't find the container with id 6c051fdd4a1d4ecada397d2da31111cb3a470d7290d69c01fd4a4b85db92f3ff Jan 29 09:17:14 crc kubenswrapper[4861]: I0129 09:17:14.179491 4861 generic.go:334] "Generic (PLEG): container finished" podID="58632105-b529-43b1-b44f-7d042299652a" containerID="11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a" exitCode=0 Jan 29 09:17:14 crc kubenswrapper[4861]: I0129 09:17:14.179532 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vprk6" event={"ID":"58632105-b529-43b1-b44f-7d042299652a","Type":"ContainerDied","Data":"11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a"} Jan 29 09:17:14 crc kubenswrapper[4861]: I0129 09:17:14.179554 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vprk6" event={"ID":"58632105-b529-43b1-b44f-7d042299652a","Type":"ContainerStarted","Data":"6c051fdd4a1d4ecada397d2da31111cb3a470d7290d69c01fd4a4b85db92f3ff"} Jan 29 09:17:14 crc kubenswrapper[4861]: I0129 09:17:14.181841 4861 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 09:17:15 crc kubenswrapper[4861]: I0129 09:17:15.191242 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vprk6" event={"ID":"58632105-b529-43b1-b44f-7d042299652a","Type":"ContainerStarted","Data":"3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722"} Jan 29 09:17:18 crc kubenswrapper[4861]: I0129 09:17:18.241875 4861 generic.go:334] "Generic (PLEG): container finished" podID="58632105-b529-43b1-b44f-7d042299652a" containerID="3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722" exitCode=0 Jan 29 09:17:18 crc kubenswrapper[4861]: I0129 09:17:18.241953 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vprk6" event={"ID":"58632105-b529-43b1-b44f-7d042299652a","Type":"ContainerDied","Data":"3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722"} Jan 29 09:17:21 crc kubenswrapper[4861]: I0129 09:17:21.317321 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vprk6" event={"ID":"58632105-b529-43b1-b44f-7d042299652a","Type":"ContainerStarted","Data":"ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c"} Jan 29 09:17:21 crc kubenswrapper[4861]: I0129 09:17:21.361919 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vprk6" podStartSLOduration=3.270631535 podStartE2EDuration="9.361902828s" podCreationTimestamp="2026-01-29 09:17:12 +0000 UTC" firstStartedPulling="2026-01-29 09:17:14.181585879 +0000 UTC m=+9725.853080436" lastFinishedPulling="2026-01-29 09:17:20.272857182 +0000 UTC m=+9731.944351729" observedRunningTime="2026-01-29 09:17:21.344933035 +0000 UTC m=+9733.016427592" watchObservedRunningTime="2026-01-29 09:17:21.361902828 +0000 UTC m=+9733.033397385" Jan 29 09:17:22 crc kubenswrapper[4861]: I0129 09:17:22.827314 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:22 crc kubenswrapper[4861]: I0129 09:17:22.827621 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:22 crc kubenswrapper[4861]: I0129 09:17:22.880298 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:32 crc kubenswrapper[4861]: I0129 09:17:32.935181 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:33 crc kubenswrapper[4861]: I0129 09:17:33.814127 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vprk6"] Jan 29 09:17:33 crc kubenswrapper[4861]: I0129 09:17:33.815227 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vprk6" podUID="58632105-b529-43b1-b44f-7d042299652a" containerName="registry-server" containerID="cri-o://ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c" gracePeriod=2 Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.401914 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.468307 4861 generic.go:334] "Generic (PLEG): container finished" podID="58632105-b529-43b1-b44f-7d042299652a" containerID="ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c" exitCode=0 Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.468360 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vprk6" event={"ID":"58632105-b529-43b1-b44f-7d042299652a","Type":"ContainerDied","Data":"ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c"} Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.468378 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vprk6" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.468395 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vprk6" event={"ID":"58632105-b529-43b1-b44f-7d042299652a","Type":"ContainerDied","Data":"6c051fdd4a1d4ecada397d2da31111cb3a470d7290d69c01fd4a4b85db92f3ff"} Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.468416 4861 scope.go:117] "RemoveContainer" containerID="ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.486537 4861 scope.go:117] "RemoveContainer" containerID="3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.510120 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgb4d\" (UniqueName: \"kubernetes.io/projected/58632105-b529-43b1-b44f-7d042299652a-kube-api-access-lgb4d\") pod \"58632105-b529-43b1-b44f-7d042299652a\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.510365 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-utilities\") pod \"58632105-b529-43b1-b44f-7d042299652a\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.510482 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-catalog-content\") pod \"58632105-b529-43b1-b44f-7d042299652a\" (UID: \"58632105-b529-43b1-b44f-7d042299652a\") " Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.513381 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-utilities" (OuterVolumeSpecName: "utilities") pod "58632105-b529-43b1-b44f-7d042299652a" (UID: "58632105-b529-43b1-b44f-7d042299652a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.514704 4861 scope.go:117] "RemoveContainer" containerID="11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.517493 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58632105-b529-43b1-b44f-7d042299652a-kube-api-access-lgb4d" (OuterVolumeSpecName: "kube-api-access-lgb4d") pod "58632105-b529-43b1-b44f-7d042299652a" (UID: "58632105-b529-43b1-b44f-7d042299652a"). InnerVolumeSpecName "kube-api-access-lgb4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.563743 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58632105-b529-43b1-b44f-7d042299652a" (UID: "58632105-b529-43b1-b44f-7d042299652a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.611483 4861 scope.go:117] "RemoveContainer" containerID="ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c" Jan 29 09:17:34 crc kubenswrapper[4861]: E0129 09:17:34.612042 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c\": container with ID starting with ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c not found: ID does not exist" containerID="ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.612100 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c"} err="failed to get container status \"ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c\": rpc error: code = NotFound desc = could not find container \"ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c\": container with ID starting with ea50a4c1f0b58feb5e88e410a1f766f9aa97ff40a696b8f12a8b842c6499fc8c not found: ID does not exist" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.612125 4861 scope.go:117] "RemoveContainer" containerID="3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722" Jan 29 09:17:34 crc kubenswrapper[4861]: E0129 09:17:34.612563 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722\": container with ID starting with 3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722 not found: ID does not exist" containerID="3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.612615 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722"} err="failed to get container status \"3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722\": rpc error: code = NotFound desc = could not find container \"3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722\": container with ID starting with 3d011d12cb0c01e96473dbdf7fbb8c9ae6bf74e78d29a22a7ebf5370e762c722 not found: ID does not exist" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.612650 4861 scope.go:117] "RemoveContainer" containerID="11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.612762 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.612794 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58632105-b529-43b1-b44f-7d042299652a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.612808 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgb4d\" (UniqueName: \"kubernetes.io/projected/58632105-b529-43b1-b44f-7d042299652a-kube-api-access-lgb4d\") on node \"crc\" DevicePath \"\"" Jan 29 09:17:34 crc kubenswrapper[4861]: E0129 09:17:34.613181 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a\": container with ID starting with 11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a not found: ID does not exist" containerID="11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.613216 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a"} err="failed to get container status \"11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a\": rpc error: code = NotFound desc = could not find container \"11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a\": container with ID starting with 11fbfee7513c3007bccc7673b8b8a9a74545ba6e42c6fe8887f96ee02490424a not found: ID does not exist" Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.814030 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vprk6"] Jan 29 09:17:34 crc kubenswrapper[4861]: I0129 09:17:34.824831 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vprk6"] Jan 29 09:17:35 crc kubenswrapper[4861]: I0129 09:17:35.128654 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58632105-b529-43b1-b44f-7d042299652a" path="/var/lib/kubelet/pods/58632105-b529-43b1-b44f-7d042299652a/volumes" Jan 29 09:18:30 crc kubenswrapper[4861]: I0129 09:18:30.629935 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:18:30 crc kubenswrapper[4861]: I0129 09:18:30.630529 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:19:00 crc kubenswrapper[4861]: I0129 09:19:00.630220 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:19:00 crc kubenswrapper[4861]: I0129 09:19:00.630735 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:19:22 crc kubenswrapper[4861]: I0129 09:19:22.534562 4861 generic.go:334] "Generic (PLEG): container finished" podID="e8494e0d-125e-476c-964c-98d08119fccf" containerID="de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427" exitCode=0 Jan 29 09:19:22 crc kubenswrapper[4861]: I0129 09:19:22.534682 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-59th4/must-gather-k4sl8" event={"ID":"e8494e0d-125e-476c-964c-98d08119fccf","Type":"ContainerDied","Data":"de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427"} Jan 29 09:19:22 crc kubenswrapper[4861]: I0129 09:19:22.535932 4861 scope.go:117] "RemoveContainer" containerID="de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427" Jan 29 09:19:23 crc kubenswrapper[4861]: I0129 09:19:23.535309 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-59th4_must-gather-k4sl8_e8494e0d-125e-476c-964c-98d08119fccf/gather/0.log" Jan 29 09:19:30 crc kubenswrapper[4861]: I0129 09:19:30.630120 4861 patch_prober.go:28] interesting pod/machine-config-daemon-wkh9p container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 09:19:30 crc kubenswrapper[4861]: I0129 09:19:30.630730 4861 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 09:19:30 crc kubenswrapper[4861]: I0129 09:19:30.630791 4861 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" Jan 29 09:19:30 crc kubenswrapper[4861]: I0129 09:19:30.632056 4861 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed"} pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 09:19:30 crc kubenswrapper[4861]: I0129 09:19:30.632128 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerName="machine-config-daemon" containerID="cri-o://9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" gracePeriod=600 Jan 29 09:19:30 crc kubenswrapper[4861]: E0129 09:19:30.867341 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:19:31 crc kubenswrapper[4861]: I0129 09:19:31.643069 4861 generic.go:334] "Generic (PLEG): container finished" podID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" exitCode=0 Jan 29 09:19:31 crc kubenswrapper[4861]: I0129 09:19:31.643132 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" event={"ID":"5fc70726-e8f8-40d8-b31f-2853e3e856d7","Type":"ContainerDied","Data":"9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed"} Jan 29 09:19:31 crc kubenswrapper[4861]: I0129 09:19:31.643452 4861 scope.go:117] "RemoveContainer" containerID="c6f1b77eb0a98192a07ee6373d3161e796a4f5d373da3b2d30e4e75fe52c4f99" Jan 29 09:19:31 crc kubenswrapper[4861]: I0129 09:19:31.644332 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:19:31 crc kubenswrapper[4861]: E0129 09:19:31.644761 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.043481 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-59th4/must-gather-k4sl8"] Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.043761 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-59th4/must-gather-k4sl8" podUID="e8494e0d-125e-476c-964c-98d08119fccf" containerName="copy" containerID="cri-o://5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609" gracePeriod=2 Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.057067 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-59th4/must-gather-k4sl8"] Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.542662 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-59th4_must-gather-k4sl8_e8494e0d-125e-476c-964c-98d08119fccf/copy/0.log" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.543404 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.627845 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e8494e0d-125e-476c-964c-98d08119fccf-must-gather-output\") pod \"e8494e0d-125e-476c-964c-98d08119fccf\" (UID: \"e8494e0d-125e-476c-964c-98d08119fccf\") " Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.627912 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5blt6\" (UniqueName: \"kubernetes.io/projected/e8494e0d-125e-476c-964c-98d08119fccf-kube-api-access-5blt6\") pod \"e8494e0d-125e-476c-964c-98d08119fccf\" (UID: \"e8494e0d-125e-476c-964c-98d08119fccf\") " Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.636983 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8494e0d-125e-476c-964c-98d08119fccf-kube-api-access-5blt6" (OuterVolumeSpecName: "kube-api-access-5blt6") pod "e8494e0d-125e-476c-964c-98d08119fccf" (UID: "e8494e0d-125e-476c-964c-98d08119fccf"). InnerVolumeSpecName "kube-api-access-5blt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.657718 4861 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-59th4_must-gather-k4sl8_e8494e0d-125e-476c-964c-98d08119fccf/copy/0.log" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.658361 4861 generic.go:334] "Generic (PLEG): container finished" podID="e8494e0d-125e-476c-964c-98d08119fccf" containerID="5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609" exitCode=143 Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.658409 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-59th4/must-gather-k4sl8" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.658417 4861 scope.go:117] "RemoveContainer" containerID="5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.731227 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5blt6\" (UniqueName: \"kubernetes.io/projected/e8494e0d-125e-476c-964c-98d08119fccf-kube-api-access-5blt6\") on node \"crc\" DevicePath \"\"" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.733920 4861 scope.go:117] "RemoveContainer" containerID="de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.865867 4861 scope.go:117] "RemoveContainer" containerID="5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609" Jan 29 09:19:32 crc kubenswrapper[4861]: E0129 09:19:32.867355 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609\": container with ID starting with 5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609 not found: ID does not exist" containerID="5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.867414 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609"} err="failed to get container status \"5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609\": rpc error: code = NotFound desc = could not find container \"5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609\": container with ID starting with 5b346b9081c84b3cfeab2702a989f112c71b32cb9a9f2799b10d5a2b9170a609 not found: ID does not exist" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.867447 4861 scope.go:117] "RemoveContainer" containerID="de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427" Jan 29 09:19:32 crc kubenswrapper[4861]: E0129 09:19:32.867865 4861 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427\": container with ID starting with de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427 not found: ID does not exist" containerID="de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.867899 4861 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427"} err="failed to get container status \"de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427\": rpc error: code = NotFound desc = could not find container \"de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427\": container with ID starting with de29a8ce37addb010da099e1085d850bf64f7dcba3dae01b469b2606c0adc427 not found: ID does not exist" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.890158 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8494e0d-125e-476c-964c-98d08119fccf-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "e8494e0d-125e-476c-964c-98d08119fccf" (UID: "e8494e0d-125e-476c-964c-98d08119fccf"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:19:32 crc kubenswrapper[4861]: I0129 09:19:32.937069 4861 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e8494e0d-125e-476c-964c-98d08119fccf-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 29 09:19:33 crc kubenswrapper[4861]: I0129 09:19:33.133058 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8494e0d-125e-476c-964c-98d08119fccf" path="/var/lib/kubelet/pods/e8494e0d-125e-476c-964c-98d08119fccf/volumes" Jan 29 09:19:44 crc kubenswrapper[4861]: I0129 09:19:44.116453 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:19:44 crc kubenswrapper[4861]: E0129 09:19:44.117259 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:19:56 crc kubenswrapper[4861]: I0129 09:19:56.117387 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:19:56 crc kubenswrapper[4861]: E0129 09:19:56.118629 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:20:11 crc kubenswrapper[4861]: I0129 09:20:11.118582 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:20:11 crc kubenswrapper[4861]: E0129 09:20:11.119510 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:20:22 crc kubenswrapper[4861]: I0129 09:20:22.116955 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:20:22 crc kubenswrapper[4861]: E0129 09:20:22.117743 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:20:33 crc kubenswrapper[4861]: I0129 09:20:33.117549 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:20:33 crc kubenswrapper[4861]: E0129 09:20:33.118486 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:20:48 crc kubenswrapper[4861]: I0129 09:20:48.117272 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:20:48 crc kubenswrapper[4861]: E0129 09:20:48.118144 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:20:59 crc kubenswrapper[4861]: I0129 09:20:59.124585 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:20:59 crc kubenswrapper[4861]: E0129 09:20:59.125860 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:21:13 crc kubenswrapper[4861]: I0129 09:21:13.117430 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:21:13 crc kubenswrapper[4861]: E0129 09:21:13.118238 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:21:28 crc kubenswrapper[4861]: I0129 09:21:28.116707 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:21:28 crc kubenswrapper[4861]: E0129 09:21:28.118422 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:21:40 crc kubenswrapper[4861]: I0129 09:21:40.117000 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:21:40 crc kubenswrapper[4861]: E0129 09:21:40.117774 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.772016 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fprf7"] Jan 29 09:21:43 crc kubenswrapper[4861]: E0129 09:21:43.772994 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8494e0d-125e-476c-964c-98d08119fccf" containerName="copy" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.773007 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8494e0d-125e-476c-964c-98d08119fccf" containerName="copy" Jan 29 09:21:43 crc kubenswrapper[4861]: E0129 09:21:43.773038 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58632105-b529-43b1-b44f-7d042299652a" containerName="extract-utilities" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.773046 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="58632105-b529-43b1-b44f-7d042299652a" containerName="extract-utilities" Jan 29 09:21:43 crc kubenswrapper[4861]: E0129 09:21:43.773380 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8494e0d-125e-476c-964c-98d08119fccf" containerName="gather" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.773392 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8494e0d-125e-476c-964c-98d08119fccf" containerName="gather" Jan 29 09:21:43 crc kubenswrapper[4861]: E0129 09:21:43.773409 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58632105-b529-43b1-b44f-7d042299652a" containerName="extract-content" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.773415 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="58632105-b529-43b1-b44f-7d042299652a" containerName="extract-content" Jan 29 09:21:43 crc kubenswrapper[4861]: E0129 09:21:43.773427 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58632105-b529-43b1-b44f-7d042299652a" containerName="registry-server" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.773432 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="58632105-b529-43b1-b44f-7d042299652a" containerName="registry-server" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.773609 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8494e0d-125e-476c-964c-98d08119fccf" containerName="gather" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.773629 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="58632105-b529-43b1-b44f-7d042299652a" containerName="registry-server" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.773647 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8494e0d-125e-476c-964c-98d08119fccf" containerName="copy" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.775167 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.788370 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fprf7"] Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.947987 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df5dj\" (UniqueName: \"kubernetes.io/projected/d78ac961-2796-4860-88fc-df7f05c8608f-kube-api-access-df5dj\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.950751 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-catalog-content\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:43 crc kubenswrapper[4861]: I0129 09:21:43.951023 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-utilities\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.053387 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df5dj\" (UniqueName: \"kubernetes.io/projected/d78ac961-2796-4860-88fc-df7f05c8608f-kube-api-access-df5dj\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.053828 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-catalog-content\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.054038 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-utilities\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.054349 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-catalog-content\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.054405 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-utilities\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.093978 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df5dj\" (UniqueName: \"kubernetes.io/projected/d78ac961-2796-4860-88fc-df7f05c8608f-kube-api-access-df5dj\") pod \"community-operators-fprf7\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.105182 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.657613 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fprf7"] Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.981044 4861 generic.go:334] "Generic (PLEG): container finished" podID="d78ac961-2796-4860-88fc-df7f05c8608f" containerID="d4b8971b01a668125a7239d6919e80f22eb37d487892e631cd6cefebf739211c" exitCode=0 Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.981127 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fprf7" event={"ID":"d78ac961-2796-4860-88fc-df7f05c8608f","Type":"ContainerDied","Data":"d4b8971b01a668125a7239d6919e80f22eb37d487892e631cd6cefebf739211c"} Jan 29 09:21:44 crc kubenswrapper[4861]: I0129 09:21:44.981302 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fprf7" event={"ID":"d78ac961-2796-4860-88fc-df7f05c8608f","Type":"ContainerStarted","Data":"c14bc49a459220561d337e436f806a9464d1400942ae84479f31fba2cac9a5a3"} Jan 29 09:21:47 crc kubenswrapper[4861]: I0129 09:21:47.003440 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fprf7" event={"ID":"d78ac961-2796-4860-88fc-df7f05c8608f","Type":"ContainerStarted","Data":"268c61571ae0013acfecbcd266c05f22a1f6edc68cd159f9cd8a3163d298d6a3"} Jan 29 09:21:48 crc kubenswrapper[4861]: I0129 09:21:48.013677 4861 generic.go:334] "Generic (PLEG): container finished" podID="d78ac961-2796-4860-88fc-df7f05c8608f" containerID="268c61571ae0013acfecbcd266c05f22a1f6edc68cd159f9cd8a3163d298d6a3" exitCode=0 Jan 29 09:21:48 crc kubenswrapper[4861]: I0129 09:21:48.013778 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fprf7" event={"ID":"d78ac961-2796-4860-88fc-df7f05c8608f","Type":"ContainerDied","Data":"268c61571ae0013acfecbcd266c05f22a1f6edc68cd159f9cd8a3163d298d6a3"} Jan 29 09:21:49 crc kubenswrapper[4861]: I0129 09:21:49.024683 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fprf7" event={"ID":"d78ac961-2796-4860-88fc-df7f05c8608f","Type":"ContainerStarted","Data":"ca56f26883130526754a988f220034a555308480c0be09a209a8440d14dffc8b"} Jan 29 09:21:49 crc kubenswrapper[4861]: I0129 09:21:49.049989 4861 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fprf7" podStartSLOduration=2.543418903 podStartE2EDuration="6.049968771s" podCreationTimestamp="2026-01-29 09:21:43 +0000 UTC" firstStartedPulling="2026-01-29 09:21:44.982278037 +0000 UTC m=+9996.653772594" lastFinishedPulling="2026-01-29 09:21:48.488827905 +0000 UTC m=+10000.160322462" observedRunningTime="2026-01-29 09:21:49.042321087 +0000 UTC m=+10000.713815664" watchObservedRunningTime="2026-01-29 09:21:49.049968771 +0000 UTC m=+10000.721463328" Jan 29 09:21:54 crc kubenswrapper[4861]: I0129 09:21:54.105562 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:54 crc kubenswrapper[4861]: I0129 09:21:54.106125 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:54 crc kubenswrapper[4861]: I0129 09:21:54.528507 4861 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:55 crc kubenswrapper[4861]: I0129 09:21:55.117859 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:21:55 crc kubenswrapper[4861]: E0129 09:21:55.118116 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:21:55 crc kubenswrapper[4861]: I0129 09:21:55.136159 4861 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:55 crc kubenswrapper[4861]: I0129 09:21:55.190778 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fprf7"] Jan 29 09:21:57 crc kubenswrapper[4861]: I0129 09:21:57.097582 4861 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fprf7" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" containerName="registry-server" containerID="cri-o://ca56f26883130526754a988f220034a555308480c0be09a209a8440d14dffc8b" gracePeriod=2 Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.124604 4861 generic.go:334] "Generic (PLEG): container finished" podID="d78ac961-2796-4860-88fc-df7f05c8608f" containerID="ca56f26883130526754a988f220034a555308480c0be09a209a8440d14dffc8b" exitCode=0 Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.124666 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fprf7" event={"ID":"d78ac961-2796-4860-88fc-df7f05c8608f","Type":"ContainerDied","Data":"ca56f26883130526754a988f220034a555308480c0be09a209a8440d14dffc8b"} Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.368230 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.481496 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-utilities\") pod \"d78ac961-2796-4860-88fc-df7f05c8608f\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.481647 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-catalog-content\") pod \"d78ac961-2796-4860-88fc-df7f05c8608f\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.481692 4861 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-df5dj\" (UniqueName: \"kubernetes.io/projected/d78ac961-2796-4860-88fc-df7f05c8608f-kube-api-access-df5dj\") pod \"d78ac961-2796-4860-88fc-df7f05c8608f\" (UID: \"d78ac961-2796-4860-88fc-df7f05c8608f\") " Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.482425 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-utilities" (OuterVolumeSpecName: "utilities") pod "d78ac961-2796-4860-88fc-df7f05c8608f" (UID: "d78ac961-2796-4860-88fc-df7f05c8608f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.487553 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d78ac961-2796-4860-88fc-df7f05c8608f-kube-api-access-df5dj" (OuterVolumeSpecName: "kube-api-access-df5dj") pod "d78ac961-2796-4860-88fc-df7f05c8608f" (UID: "d78ac961-2796-4860-88fc-df7f05c8608f"). InnerVolumeSpecName "kube-api-access-df5dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.535460 4861 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d78ac961-2796-4860-88fc-df7f05c8608f" (UID: "d78ac961-2796-4860-88fc-df7f05c8608f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.584145 4861 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.584475 4861 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d78ac961-2796-4860-88fc-df7f05c8608f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 09:21:58 crc kubenswrapper[4861]: I0129 09:21:58.584557 4861 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-df5dj\" (UniqueName: \"kubernetes.io/projected/d78ac961-2796-4860-88fc-df7f05c8608f-kube-api-access-df5dj\") on node \"crc\" DevicePath \"\"" Jan 29 09:21:59 crc kubenswrapper[4861]: I0129 09:21:59.137545 4861 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fprf7" event={"ID":"d78ac961-2796-4860-88fc-df7f05c8608f","Type":"ContainerDied","Data":"c14bc49a459220561d337e436f806a9464d1400942ae84479f31fba2cac9a5a3"} Jan 29 09:21:59 crc kubenswrapper[4861]: I0129 09:21:59.137969 4861 scope.go:117] "RemoveContainer" containerID="ca56f26883130526754a988f220034a555308480c0be09a209a8440d14dffc8b" Jan 29 09:21:59 crc kubenswrapper[4861]: I0129 09:21:59.137696 4861 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fprf7" Jan 29 09:21:59 crc kubenswrapper[4861]: I0129 09:21:59.166691 4861 scope.go:117] "RemoveContainer" containerID="268c61571ae0013acfecbcd266c05f22a1f6edc68cd159f9cd8a3163d298d6a3" Jan 29 09:21:59 crc kubenswrapper[4861]: I0129 09:21:59.175016 4861 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fprf7"] Jan 29 09:21:59 crc kubenswrapper[4861]: I0129 09:21:59.187108 4861 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fprf7"] Jan 29 09:21:59 crc kubenswrapper[4861]: I0129 09:21:59.211835 4861 scope.go:117] "RemoveContainer" containerID="d4b8971b01a668125a7239d6919e80f22eb37d487892e631cd6cefebf739211c" Jan 29 09:22:01 crc kubenswrapper[4861]: I0129 09:22:01.128717 4861 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" path="/var/lib/kubelet/pods/d78ac961-2796-4860-88fc-df7f05c8608f/volumes" Jan 29 09:22:06 crc kubenswrapper[4861]: I0129 09:22:06.116597 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:22:06 crc kubenswrapper[4861]: E0129 09:22:06.118336 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:22:20 crc kubenswrapper[4861]: I0129 09:22:20.117296 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:22:20 crc kubenswrapper[4861]: E0129 09:22:20.118116 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:22:35 crc kubenswrapper[4861]: I0129 09:22:35.116624 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:22:35 crc kubenswrapper[4861]: E0129 09:22:35.118238 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:22:47 crc kubenswrapper[4861]: I0129 09:22:47.117128 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:22:47 crc kubenswrapper[4861]: E0129 09:22:47.118318 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:23:02 crc kubenswrapper[4861]: I0129 09:23:02.117532 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:23:02 crc kubenswrapper[4861]: E0129 09:23:02.118357 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:23:16 crc kubenswrapper[4861]: I0129 09:23:16.116783 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:23:16 crc kubenswrapper[4861]: E0129 09:23:16.117780 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:23:27 crc kubenswrapper[4861]: I0129 09:23:27.117082 4861 scope.go:117] "RemoveContainer" containerID="9af43d59a9dc9ef3863044bb715765bf1d3a359dc135df275cc54b1672adb7ed" Jan 29 09:23:27 crc kubenswrapper[4861]: E0129 09:23:27.117799 4861 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wkh9p_openshift-machine-config-operator(5fc70726-e8f8-40d8-b31f-2853e3e856d7)\"" pod="openshift-machine-config-operator/machine-config-daemon-wkh9p" podUID="5fc70726-e8f8-40d8-b31f-2853e3e856d7" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.113806 4861 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ts5r9"] Jan 29 09:23:35 crc kubenswrapper[4861]: E0129 09:23:35.115006 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" containerName="registry-server" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.115021 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" containerName="registry-server" Jan 29 09:23:35 crc kubenswrapper[4861]: E0129 09:23:35.115046 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" containerName="extract-content" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.115052 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" containerName="extract-content" Jan 29 09:23:35 crc kubenswrapper[4861]: E0129 09:23:35.115094 4861 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" containerName="extract-utilities" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.115102 4861 state_mem.go:107] "Deleted CPUSet assignment" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" containerName="extract-utilities" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.115288 4861 memory_manager.go:354] "RemoveStaleState removing state" podUID="d78ac961-2796-4860-88fc-df7f05c8608f" containerName="registry-server" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.117123 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.142215 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ts5r9"] Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.210771 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3754cc72-623a-4c07-9c66-1f83dc2a1e32-utilities\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.211744 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtd6d\" (UniqueName: \"kubernetes.io/projected/3754cc72-623a-4c07-9c66-1f83dc2a1e32-kube-api-access-gtd6d\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.211855 4861 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3754cc72-623a-4c07-9c66-1f83dc2a1e32-catalog-content\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.314087 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtd6d\" (UniqueName: \"kubernetes.io/projected/3754cc72-623a-4c07-9c66-1f83dc2a1e32-kube-api-access-gtd6d\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.314188 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3754cc72-623a-4c07-9c66-1f83dc2a1e32-catalog-content\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.314327 4861 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3754cc72-623a-4c07-9c66-1f83dc2a1e32-utilities\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.314760 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3754cc72-623a-4c07-9c66-1f83dc2a1e32-catalog-content\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.314798 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3754cc72-623a-4c07-9c66-1f83dc2a1e32-utilities\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.583632 4861 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtd6d\" (UniqueName: \"kubernetes.io/projected/3754cc72-623a-4c07-9c66-1f83dc2a1e32-kube-api-access-gtd6d\") pod \"redhat-marketplace-ts5r9\" (UID: \"3754cc72-623a-4c07-9c66-1f83dc2a1e32\") " pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:35 crc kubenswrapper[4861]: I0129 09:23:35.755823 4861 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ts5r9" Jan 29 09:23:36 crc kubenswrapper[4861]: I0129 09:23:36.216488 4861 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ts5r9"] var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136623444024455 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136623444017372 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136577251016521 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136577252015472 5ustar corecore